code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import logging
import threading
import functools
import coloredlogs
from qtpy.QtCore import QtInfoMsg, QtWarningMsg, QtCriticalMsg
def qt_message_handler(mode, context, message):
logger = logging.getLogger("QT Logger")
"""Qt errors handler"""
if mode == QtInfoMsg:
mode = 20
elif mode == QtWarningMsg:
mode = 30
elif mode == QtCriticalMsg:
mode = 40
elif mode == QtCriticalMsg:
mode = 50
else:
mode = 20
logger.log(mode, "(%s: %s): %s" % (context.file, context.line, message))
class Logger:
def __init__(self, ):
super(Logger, self).__init__()
self.logger = None
self.handler = None
self.formatter = None
def enable(self):
self.logger = logging.getLogger()
self.logger.setLevel(logging.NOTSET)
self.handler = logging.StreamHandler()
self.handler.setLevel(logging.NOTSET)
self.formatter = coloredlogs.ColoredFormatter("%(asctime)s "
"[%(threadName)s] "
"[%(name)s] "
"[%(levelname)s] "
"%(message)s")
self.handler.setFormatter(self.formatter)
self.logger.addHandler(self.handler)
self.logger.info("Logger enabled")
return self.logger
def set_level(self, level):
if self.logger and self.handler:
self.logger.setLevel(level)
self.handler.setLevel(level)
else:
raise Exception("Logger not enabled!")
class TaskThread(threading.Thread):
def __init__(self, group=None, target=None, name=None, args=(), kwargs=None, *, daemon=None):
super(TaskThread, self).__init__(group, target, name, args, kwargs, daemon=daemon)
self.result = None
self.exit_code = None
def run(self):
if self._target is not None:
try:
self.result = self._target(*self._args, **self._kwargs)
self.exit_code = 0
except Exception as e:
self.result = e
self.exit_code = 1
def join(self, timeout=None):
threading.Thread.join(self, timeout)
return {"result": self.result, "exit_code": self.exit_code}
def threaded(function):
"""Move function to thread.
functools.wraps copies __name__ and __doc__ from wrapped function. """
@functools.wraps(function)
def wrapper(*args, **kwargs):
thread = TaskThread(target=function, args=args, kwargs=kwargs)
thread.start()
return thread
return wrapper | lista3/utils.py | import logging
import threading
import functools
import coloredlogs
from qtpy.QtCore import QtInfoMsg, QtWarningMsg, QtCriticalMsg
def qt_message_handler(mode, context, message):
logger = logging.getLogger("QT Logger")
"""Qt errors handler"""
if mode == QtInfoMsg:
mode = 20
elif mode == QtWarningMsg:
mode = 30
elif mode == QtCriticalMsg:
mode = 40
elif mode == QtCriticalMsg:
mode = 50
else:
mode = 20
logger.log(mode, "(%s: %s): %s" % (context.file, context.line, message))
class Logger:
def __init__(self, ):
super(Logger, self).__init__()
self.logger = None
self.handler = None
self.formatter = None
def enable(self):
self.logger = logging.getLogger()
self.logger.setLevel(logging.NOTSET)
self.handler = logging.StreamHandler()
self.handler.setLevel(logging.NOTSET)
self.formatter = coloredlogs.ColoredFormatter("%(asctime)s "
"[%(threadName)s] "
"[%(name)s] "
"[%(levelname)s] "
"%(message)s")
self.handler.setFormatter(self.formatter)
self.logger.addHandler(self.handler)
self.logger.info("Logger enabled")
return self.logger
def set_level(self, level):
if self.logger and self.handler:
self.logger.setLevel(level)
self.handler.setLevel(level)
else:
raise Exception("Logger not enabled!")
class TaskThread(threading.Thread):
def __init__(self, group=None, target=None, name=None, args=(), kwargs=None, *, daemon=None):
super(TaskThread, self).__init__(group, target, name, args, kwargs, daemon=daemon)
self.result = None
self.exit_code = None
def run(self):
if self._target is not None:
try:
self.result = self._target(*self._args, **self._kwargs)
self.exit_code = 0
except Exception as e:
self.result = e
self.exit_code = 1
def join(self, timeout=None):
threading.Thread.join(self, timeout)
return {"result": self.result, "exit_code": self.exit_code}
def threaded(function):
"""Move function to thread.
functools.wraps copies __name__ and __doc__ from wrapped function. """
@functools.wraps(function)
def wrapper(*args, **kwargs):
thread = TaskThread(target=function, args=args, kwargs=kwargs)
thread.start()
return thread
return wrapper | 0.274935 | 0.058212 |
from __future__ import print_function
__all__ = ["getKeyword"]
import re
ptn = re.compile(r'\s*(?P<key>[a-zA-Z_][a-zA-Z0-9_]*)(?:\s*$|(?:\s*(?P<next>[=;])))')
def getKeyword(astr, begInd=0):
"""
Returns the next keyword from an APO format message. Keywords must start
with a letter or underscore and may contain those characters or digits thereafter.
Inputs:
astr: the string to parse
begInd: the starting index; must point to the beginning of the keyword
to be extracted, though leading white space is ignored.
Returns a duple containing:
the next keyword
the index to the next token (should be "=" or ";"), or None of end-of-string
Exceptions:
if the next non-whitespace thing is not a keyword, throws a SyntaxError
"""
mo = ptn.match(astr, begInd)
if mo is None:
raise SyntaxError("not a keyword starting at %d in :%s:" % \
(begInd,astr))
keyword = mo.group('key')
(nextInd, junk) = mo.span('next')
if nextInd < 0:
nextInd = None
return (keyword, nextInd)
if __name__ == '__main__':
# perform test
print("testing getKeyword\n")
testList = [
("text = 'test'", 0),
("text2 = 'test'", 0),
("skipme, text = 'test'", 8),
("text='test'", 0),
("text ;", 0),
("text;", 0),
("text=;", 0),
("text = ;", 0),
("text=", 0),
("text = ", 0),
("text", 0),
("_leadingUnderscore = 'test'", 0),
(" _leadingWhitespace = 'test'", 0),
("text x 'bad character after keyword'", 0),
("text , 'bad character after keyword'", 0),
("text, 'bad character immediately after keyword'", 0),
("0badKeyStart = 'test'", 0),
(", badFirstChar = 'test'", 0),
("; badFirstChar = 'test'", 0),
("'badKeyStart' = 'starts with single quote'", 0),
]
for (astr, nextInd) in testList:
try:
(adict, nextInd) = getKeyword(astr, nextInd)
print("getKeyword('%s') = \"%s\";" % (astr, adict), end=' ')
if nextInd is not None:
print("astr[%d] = \"%s\"" % (nextInd, astr[nextInd]))
else:
print("end of text")
except Exception as e:
print("failed with error: %s" % (e)) | python/opscore/RO/ParseMsg/GetKeyword.py | from __future__ import print_function
__all__ = ["getKeyword"]
import re
ptn = re.compile(r'\s*(?P<key>[a-zA-Z_][a-zA-Z0-9_]*)(?:\s*$|(?:\s*(?P<next>[=;])))')
def getKeyword(astr, begInd=0):
"""
Returns the next keyword from an APO format message. Keywords must start
with a letter or underscore and may contain those characters or digits thereafter.
Inputs:
astr: the string to parse
begInd: the starting index; must point to the beginning of the keyword
to be extracted, though leading white space is ignored.
Returns a duple containing:
the next keyword
the index to the next token (should be "=" or ";"), or None of end-of-string
Exceptions:
if the next non-whitespace thing is not a keyword, throws a SyntaxError
"""
mo = ptn.match(astr, begInd)
if mo is None:
raise SyntaxError("not a keyword starting at %d in :%s:" % \
(begInd,astr))
keyword = mo.group('key')
(nextInd, junk) = mo.span('next')
if nextInd < 0:
nextInd = None
return (keyword, nextInd)
if __name__ == '__main__':
# perform test
print("testing getKeyword\n")
testList = [
("text = 'test'", 0),
("text2 = 'test'", 0),
("skipme, text = 'test'", 8),
("text='test'", 0),
("text ;", 0),
("text;", 0),
("text=;", 0),
("text = ;", 0),
("text=", 0),
("text = ", 0),
("text", 0),
("_leadingUnderscore = 'test'", 0),
(" _leadingWhitespace = 'test'", 0),
("text x 'bad character after keyword'", 0),
("text , 'bad character after keyword'", 0),
("text, 'bad character immediately after keyword'", 0),
("0badKeyStart = 'test'", 0),
(", badFirstChar = 'test'", 0),
("; badFirstChar = 'test'", 0),
("'badKeyStart' = 'starts with single quote'", 0),
]
for (astr, nextInd) in testList:
try:
(adict, nextInd) = getKeyword(astr, nextInd)
print("getKeyword('%s') = \"%s\";" % (astr, adict), end=' ')
if nextInd is not None:
print("astr[%d] = \"%s\"" % (nextInd, astr[nextInd]))
else:
print("end of text")
except Exception as e:
print("failed with error: %s" % (e)) | 0.603932 | 0.303487 |
import unittest
import numpy as np
from numpy.random import random
from numpy.testing import assert_array_almost_equal, assert_array_equal
from structured import hmm
class TestHMM(unittest.TestCase):
def test_hmm_0(self):
# random markov model, deterministic output
nstates = 5
T = 100
pi = random(nstates)
tr = random((nstates, nstates))
def output_distr(y, i, t):
return np.array(y==i+2, 'd')[0]
# states
x = random(T)
x = np.floor(x*nstates)
# observations
y = np.transpose(np.array([x+2], 'd'))
# compute epsilon for this case
eps0 = np.zeros((T-1, nstates, nstates), 'd')
for t in range(T-1):
eps0[t, int(x[t]), int(x[t+1])] = 1.
gamma, L, eps = hmm.forward_backward(
y, pi, tr, output_distr, store_A=True)
for t in range(T):
x_t = int(y[t,0]-2)
self.assertEqual(sum(gamma[t, :]), 1.0)
self.assertEqual(gamma[t, x_t], 1.0)
assert_array_almost_equal(eps, eps0, 7)
gamma, L, eps = hmm.forward_backward(
y, pi, tr, output_distr, store_A=False)
for t in range(T):
x_t = int(y[t,0]-2)
self.assertEqual(sum(gamma[t, :]), 1.0)
self.assertEqual(gamma[t, x_t], 1.0)
assert_array_almost_equal(eps, eps0, 7)
def test_hmm_1(self):
# hand-verified markov model
pi = np.array([0.2, 0.8])
tr = np.array([[0.1, 0.9], [0.8, 0.2]])
# observations
obs = np.array([[0.5, 0.2, 0.3], [0.1, 0.8, 0.1]])
def output_distr(y, i ,t):
return obs[i, y[0]]
y = np.array([[2],[0],[1]], 'i')
# hand-computed probability of x_t being 1 and likelihood
p1 = [0.85970149253731343,
0.079601990049751242,
0.93532338308457708]
py = 0.02814
gamma, L, epsilon = hmm.forward_backward(y, pi, tr, output_distr)
# test that it returns a p.distr. for each time point
not_one = max(abs(np.sum(gamma, axis=1)-1.))
self.assertLess(not_one, 1e-7)
# test that the distr. is the right one
max_dist = max(abs(gamma[:,1]-p1))
self.assertLess(max_dist, 1e-7)
# test the value of the likelihood
self.assertLess(abs(np.log(py)-L), 1e-7)
self.assertEqual(sum(epsilon[0,:,:].ravel()), 1.0)
self.assertEqual(sum(epsilon[1,:,:].ravel()), 1.0)
def test_sample_chain(self):
one = 1.-1e-5
pi = [one, 0., 0.]
ttr = np.zeros((3,3), dtype='d')
ttr[0,1] = one
ttr[1,2] = one
ttr[2,0] = one
x = hmm.sample_chain(6, 3, pi, ttr)
for i in range(3):
assert_array_equal(x[:,i], [0.,1.,2.,0.,1.,2.])
def test_get_prior(self):
# hand-verified markov model (no observations)
pi = np.array([0.2, 0.8])
ttr = np.array([[0.1, 0.9], [0.8, 0.2]])
# hand-computed probability of x_t being 1
p1 = [0.8, 0.34, 0.662]
eps0 = [[[0.02, 0.18], [0.64, 0.16]], [[0.066, 0.594], [0.272, 0.068]]]
p, L, eps = hmm.get_prior(3, pi, ttr)
# test that it returns a p.distr. for each time point
not_one = max(abs(np.sum(p, axis=1)-1.))
self.assertLess(not_one, 1e-7)
# test that the distr. is the right one
assert_array_almost_equal(p1, p[:,1], 7)
assert_array_almost_equal(eps, eps0, 7) | structured/tests/test_hmm.py | import unittest
import numpy as np
from numpy.random import random
from numpy.testing import assert_array_almost_equal, assert_array_equal
from structured import hmm
class TestHMM(unittest.TestCase):
def test_hmm_0(self):
# random markov model, deterministic output
nstates = 5
T = 100
pi = random(nstates)
tr = random((nstates, nstates))
def output_distr(y, i, t):
return np.array(y==i+2, 'd')[0]
# states
x = random(T)
x = np.floor(x*nstates)
# observations
y = np.transpose(np.array([x+2], 'd'))
# compute epsilon for this case
eps0 = np.zeros((T-1, nstates, nstates), 'd')
for t in range(T-1):
eps0[t, int(x[t]), int(x[t+1])] = 1.
gamma, L, eps = hmm.forward_backward(
y, pi, tr, output_distr, store_A=True)
for t in range(T):
x_t = int(y[t,0]-2)
self.assertEqual(sum(gamma[t, :]), 1.0)
self.assertEqual(gamma[t, x_t], 1.0)
assert_array_almost_equal(eps, eps0, 7)
gamma, L, eps = hmm.forward_backward(
y, pi, tr, output_distr, store_A=False)
for t in range(T):
x_t = int(y[t,0]-2)
self.assertEqual(sum(gamma[t, :]), 1.0)
self.assertEqual(gamma[t, x_t], 1.0)
assert_array_almost_equal(eps, eps0, 7)
def test_hmm_1(self):
# hand-verified markov model
pi = np.array([0.2, 0.8])
tr = np.array([[0.1, 0.9], [0.8, 0.2]])
# observations
obs = np.array([[0.5, 0.2, 0.3], [0.1, 0.8, 0.1]])
def output_distr(y, i ,t):
return obs[i, y[0]]
y = np.array([[2],[0],[1]], 'i')
# hand-computed probability of x_t being 1 and likelihood
p1 = [0.85970149253731343,
0.079601990049751242,
0.93532338308457708]
py = 0.02814
gamma, L, epsilon = hmm.forward_backward(y, pi, tr, output_distr)
# test that it returns a p.distr. for each time point
not_one = max(abs(np.sum(gamma, axis=1)-1.))
self.assertLess(not_one, 1e-7)
# test that the distr. is the right one
max_dist = max(abs(gamma[:,1]-p1))
self.assertLess(max_dist, 1e-7)
# test the value of the likelihood
self.assertLess(abs(np.log(py)-L), 1e-7)
self.assertEqual(sum(epsilon[0,:,:].ravel()), 1.0)
self.assertEqual(sum(epsilon[1,:,:].ravel()), 1.0)
def test_sample_chain(self):
one = 1.-1e-5
pi = [one, 0., 0.]
ttr = np.zeros((3,3), dtype='d')
ttr[0,1] = one
ttr[1,2] = one
ttr[2,0] = one
x = hmm.sample_chain(6, 3, pi, ttr)
for i in range(3):
assert_array_equal(x[:,i], [0.,1.,2.,0.,1.,2.])
def test_get_prior(self):
# hand-verified markov model (no observations)
pi = np.array([0.2, 0.8])
ttr = np.array([[0.1, 0.9], [0.8, 0.2]])
# hand-computed probability of x_t being 1
p1 = [0.8, 0.34, 0.662]
eps0 = [[[0.02, 0.18], [0.64, 0.16]], [[0.066, 0.594], [0.272, 0.068]]]
p, L, eps = hmm.get_prior(3, pi, ttr)
# test that it returns a p.distr. for each time point
not_one = max(abs(np.sum(p, axis=1)-1.))
self.assertLess(not_one, 1e-7)
# test that the distr. is the right one
assert_array_almost_equal(p1, p[:,1], 7)
assert_array_almost_equal(eps, eps0, 7) | 0.67662 | 0.748214 |
from zope.interface import implements
from axiom.item import Item
from axiom.attributes import reference
from imaginary.iimaginary import ISittable, IContainer, IMovementRestriction
from imaginary.eimaginary import ActionFailure
from imaginary.events import ThatDoesntWork
from imaginary.language import Noun
from imaginary.action import Action, TargetAction
from imaginary.events import Success
from imaginary.enhancement import Enhancement
from imaginary.objects import Container
from imaginary.pyparsing import Literal, Optional, restOfLine
class Sit(TargetAction):
"""
An action allowing a player to sit down in a chair.
"""
expr = (Literal("sit") + Optional(Literal("on")) +
restOfLine.setResultsName("target"))
targetInterface = ISittable
def do(self, player, line, target):
"""
Do the action; sit down.
"""
target.seat(player)
actorMessage=["You sit in ",
Noun(target.thing).definiteNounPhrase(),"."]
otherMessage=[player.thing, " sits in ",
Noun(target.thing).definiteNounPhrase(),"."]
Success(actor=player.thing, location=player.thing.location,
actorMessage=actorMessage,
otherMessage=otherMessage).broadcast()
class Stand(Action):
"""
Stand up from a sitting position.
"""
expr = (Literal("stand") + Optional(Literal("up")))
def do(self, player, line):
"""
Do the action; stand up.
"""
# XXX This is wrong. I should be issuing an obtain() query to find
# something that qualifies as "my location" or "the thing I'm already
# sitting in".
chair = ISittable(player.thing.location, None)
if chair is None:
raise ActionFailure(ThatDoesntWork(
actor=player.thing,
actorMessage=["You're already standing."]))
chair.unseat(player)
Success(actor=player.thing, location=player.thing.location,
actorMessage=["You stand up."],
otherMessage=[player.thing, " stands up."]).broadcast()
class Chair(Enhancement, Item):
"""
A chair is a thing you can sit in.
"""
implements(ISittable, IMovementRestriction)
powerupInterfaces = [ISittable]
thing = reference()
container = reference()
def movementImminent(self, movee, destination):
"""
A player tried to move while they were seated. Prevent them from doing
so, noting that they must stand first.
(Assume the player was trying to move themselves, although there's no
way to know currently.)
"""
raise ActionFailure(ThatDoesntWork(
actor=movee,
actorMessage=u"You can't do that while sitting down."))
def applyEnhancement(self):
"""
Apply this enhancement to this L{Chair}'s thing, creating a
L{Container} to hold the seated player, if necessary.
"""
super(Chair, self).applyEnhancement()
container = IContainer(self.thing, None)
if container is None:
container = Container.createFor(self.thing, capacity=300)
self.container = container
def seat(self, player):
"""
The player sat down on this chair; place them into it and prevent them
from moving elsewhere until they stand up.
"""
player.thing.moveTo(self.container)
player.thing.powerUp(self, IMovementRestriction)
def unseat(self, player):
"""
The player stood up; remove them from this chair.
"""
player.thing.powerDown(self, IMovementRestriction)
player.thing.moveTo(self.container.thing.location) | ExampleGame/examplegame/furniture.py | from zope.interface import implements
from axiom.item import Item
from axiom.attributes import reference
from imaginary.iimaginary import ISittable, IContainer, IMovementRestriction
from imaginary.eimaginary import ActionFailure
from imaginary.events import ThatDoesntWork
from imaginary.language import Noun
from imaginary.action import Action, TargetAction
from imaginary.events import Success
from imaginary.enhancement import Enhancement
from imaginary.objects import Container
from imaginary.pyparsing import Literal, Optional, restOfLine
class Sit(TargetAction):
"""
An action allowing a player to sit down in a chair.
"""
expr = (Literal("sit") + Optional(Literal("on")) +
restOfLine.setResultsName("target"))
targetInterface = ISittable
def do(self, player, line, target):
"""
Do the action; sit down.
"""
target.seat(player)
actorMessage=["You sit in ",
Noun(target.thing).definiteNounPhrase(),"."]
otherMessage=[player.thing, " sits in ",
Noun(target.thing).definiteNounPhrase(),"."]
Success(actor=player.thing, location=player.thing.location,
actorMessage=actorMessage,
otherMessage=otherMessage).broadcast()
class Stand(Action):
"""
Stand up from a sitting position.
"""
expr = (Literal("stand") + Optional(Literal("up")))
def do(self, player, line):
"""
Do the action; stand up.
"""
# XXX This is wrong. I should be issuing an obtain() query to find
# something that qualifies as "my location" or "the thing I'm already
# sitting in".
chair = ISittable(player.thing.location, None)
if chair is None:
raise ActionFailure(ThatDoesntWork(
actor=player.thing,
actorMessage=["You're already standing."]))
chair.unseat(player)
Success(actor=player.thing, location=player.thing.location,
actorMessage=["You stand up."],
otherMessage=[player.thing, " stands up."]).broadcast()
class Chair(Enhancement, Item):
"""
A chair is a thing you can sit in.
"""
implements(ISittable, IMovementRestriction)
powerupInterfaces = [ISittable]
thing = reference()
container = reference()
def movementImminent(self, movee, destination):
"""
A player tried to move while they were seated. Prevent them from doing
so, noting that they must stand first.
(Assume the player was trying to move themselves, although there's no
way to know currently.)
"""
raise ActionFailure(ThatDoesntWork(
actor=movee,
actorMessage=u"You can't do that while sitting down."))
def applyEnhancement(self):
"""
Apply this enhancement to this L{Chair}'s thing, creating a
L{Container} to hold the seated player, if necessary.
"""
super(Chair, self).applyEnhancement()
container = IContainer(self.thing, None)
if container is None:
container = Container.createFor(self.thing, capacity=300)
self.container = container
def seat(self, player):
"""
The player sat down on this chair; place them into it and prevent them
from moving elsewhere until they stand up.
"""
player.thing.moveTo(self.container)
player.thing.powerUp(self, IMovementRestriction)
def unseat(self, player):
"""
The player stood up; remove them from this chair.
"""
player.thing.powerDown(self, IMovementRestriction)
player.thing.moveTo(self.container.thing.location) | 0.600305 | 0.210138 |
from typing import Dict, Tuple
import numpy as np
from ax.core.types import TParameterization
from ax.exceptions.core import OptimizationShouldStop
from ax.global_stopping.strategies.base import BaseGlobalStoppingStrategy
from ax.service.ax_client import AxClient
from ax.utils.common.testutils import TestCase
from ax.utils.measurement.synthetic_functions import branin
from ax.utils.testing.core_stubs import DummyGlobalStoppingStrategy
class TestGlobalStoppingIntegration(TestCase):
def get_ax_client_for_branin(
self,
global_stopping_strategy: BaseGlobalStoppingStrategy,
) -> AxClient:
"""
Instantiates an AxClient for the branin experiment with the specified
global stopping strategy.
"""
ax_client = AxClient(global_stopping_strategy=global_stopping_strategy)
ax_client.create_experiment(
name="branin_test_experiment",
parameters=[
{
"name": "x1",
"type": "range",
"bounds": [-5.0, 10.0],
},
{
"name": "x2",
"type": "range",
"bounds": [0.0, 15.0],
},
],
objective_name="branin",
minimize=True,
)
return ax_client
def evaluate(self, parameters: TParameterization) -> Dict[str, Tuple[float, float]]:
"""Evaluates the parameters for branin experiment."""
x = np.array([parameters.get(f"x{i+1}") for i in range(2)])
return {"branin": (branin(x), 0.0)}
def test_global_stopping_integration(self):
"""
Specifying a dummy global stopping strategy which stops
the optimization after 3 trials are completed.
"""
global_stopping_strategy = DummyGlobalStoppingStrategy(
min_trials=2, trial_to_stop=3
)
ax_client = self.get_ax_client_for_branin(
global_stopping_strategy=global_stopping_strategy
)
# Running the first 3 iterations.
for _ in range(3):
parameters, trial_index = ax_client.get_next_trial()
ax_client.complete_trial(
trial_index=trial_index, raw_data=self.evaluate(parameters)
)
# Trying to run the 4th iteration, which should raise
exception = OptimizationShouldStop(message="Stop the optimization.")
with self.assertRaises(OptimizationShouldStop) as cm:
parameters, trial_index = ax_client.get_next_trial()
# Assert Exception's message is unchanged.
self.assertEqual(cm.exception.message, exception.message)
# Trying to run the 4th iteration by overruling the stopping strategy.
parameters, trial_index = ax_client.get_next_trial(force=True)
self.assertIsNotNone(parameters)
def test_min_trials(self):
"""
Tests the min_trials mechanism of the stopping strategy; that is,
the stopping strategy should not take effect before min_trials trials
are completed.
"""
global_stopping_strategy = DummyGlobalStoppingStrategy(
min_trials=3, trial_to_stop=2
)
ax_client = self.get_ax_client_for_branin(
global_stopping_strategy=global_stopping_strategy
)
# Running the first 2 iterations.
for _ in range(2):
parameters, trial_index = ax_client.get_next_trial()
ax_client.complete_trial(
trial_index=trial_index, raw_data=self.evaluate(parameters)
)
# Since min_trials=3, GSS should not stop creating the 3rd iteration.
parameters, trial_index = ax_client.get_next_trial()
ax_client.complete_trial(
trial_index=trial_index, raw_data=self.evaluate(parameters)
)
self.assertIsNotNone(parameters)
# Now, GSS should stop creating the 4th iteration.
exception = OptimizationShouldStop(message="Stop the optimization.")
with self.assertRaises(OptimizationShouldStop) as cm:
parameters, trial_index = ax_client.get_next_trial()
# Assert Exception's message is unchanged.
self.assertEqual(cm.exception.message, exception.message) | ax/service/tests/test_global_stopping.py |
from typing import Dict, Tuple
import numpy as np
from ax.core.types import TParameterization
from ax.exceptions.core import OptimizationShouldStop
from ax.global_stopping.strategies.base import BaseGlobalStoppingStrategy
from ax.service.ax_client import AxClient
from ax.utils.common.testutils import TestCase
from ax.utils.measurement.synthetic_functions import branin
from ax.utils.testing.core_stubs import DummyGlobalStoppingStrategy
class TestGlobalStoppingIntegration(TestCase):
def get_ax_client_for_branin(
self,
global_stopping_strategy: BaseGlobalStoppingStrategy,
) -> AxClient:
"""
Instantiates an AxClient for the branin experiment with the specified
global stopping strategy.
"""
ax_client = AxClient(global_stopping_strategy=global_stopping_strategy)
ax_client.create_experiment(
name="branin_test_experiment",
parameters=[
{
"name": "x1",
"type": "range",
"bounds": [-5.0, 10.0],
},
{
"name": "x2",
"type": "range",
"bounds": [0.0, 15.0],
},
],
objective_name="branin",
minimize=True,
)
return ax_client
def evaluate(self, parameters: TParameterization) -> Dict[str, Tuple[float, float]]:
"""Evaluates the parameters for branin experiment."""
x = np.array([parameters.get(f"x{i+1}") for i in range(2)])
return {"branin": (branin(x), 0.0)}
def test_global_stopping_integration(self):
"""
Specifying a dummy global stopping strategy which stops
the optimization after 3 trials are completed.
"""
global_stopping_strategy = DummyGlobalStoppingStrategy(
min_trials=2, trial_to_stop=3
)
ax_client = self.get_ax_client_for_branin(
global_stopping_strategy=global_stopping_strategy
)
# Running the first 3 iterations.
for _ in range(3):
parameters, trial_index = ax_client.get_next_trial()
ax_client.complete_trial(
trial_index=trial_index, raw_data=self.evaluate(parameters)
)
# Trying to run the 4th iteration, which should raise
exception = OptimizationShouldStop(message="Stop the optimization.")
with self.assertRaises(OptimizationShouldStop) as cm:
parameters, trial_index = ax_client.get_next_trial()
# Assert Exception's message is unchanged.
self.assertEqual(cm.exception.message, exception.message)
# Trying to run the 4th iteration by overruling the stopping strategy.
parameters, trial_index = ax_client.get_next_trial(force=True)
self.assertIsNotNone(parameters)
def test_min_trials(self):
"""
Tests the min_trials mechanism of the stopping strategy; that is,
the stopping strategy should not take effect before min_trials trials
are completed.
"""
global_stopping_strategy = DummyGlobalStoppingStrategy(
min_trials=3, trial_to_stop=2
)
ax_client = self.get_ax_client_for_branin(
global_stopping_strategy=global_stopping_strategy
)
# Running the first 2 iterations.
for _ in range(2):
parameters, trial_index = ax_client.get_next_trial()
ax_client.complete_trial(
trial_index=trial_index, raw_data=self.evaluate(parameters)
)
# Since min_trials=3, GSS should not stop creating the 3rd iteration.
parameters, trial_index = ax_client.get_next_trial()
ax_client.complete_trial(
trial_index=trial_index, raw_data=self.evaluate(parameters)
)
self.assertIsNotNone(parameters)
# Now, GSS should stop creating the 4th iteration.
exception = OptimizationShouldStop(message="Stop the optimization.")
with self.assertRaises(OptimizationShouldStop) as cm:
parameters, trial_index = ax_client.get_next_trial()
# Assert Exception's message is unchanged.
self.assertEqual(cm.exception.message, exception.message) | 0.927986 | 0.284731 |
from __future__ import absolute_import, print_function
import imp
import sys
from collections import namedtuple
import pytest
from flask import Flask
from flask import current_app as flask_current_app
from flask import g
from flask_limiter import Limiter
from mock import patch
from pkg_resources import Distribution
from invenio_app import InvenioApp
from invenio_app.config import APP_DEFAULT_SECURE_HEADERS, set_rate_limit
from invenio_app.ext import useragent_and_ip_limit_key
from invenio_app.helpers import obj_or_import_string
@pytest.fixture()
def base_app():
"""Flask application fixture."""
app_ = Flask('testapp')
app_.config.update(
SECRET_KEY='SECRET_KEY',
TESTING=True,
)
app_.config['APP_DEFAULT_SECURE_HEADERS'] = APP_DEFAULT_SECURE_HEADERS
app_.config['APP_DEFAULT_SECURE_HEADERS']['force_https'] = False
@app_.route('/requestid')
def requestid():
from flask import g # Prevent pytest problems
return g.request_id if g and hasattr(g, 'request_id') else ''
@app_.route('/limited_rate')
def limited_rate():
return 'test'
@app_.route('/unlimited_rate')
def unlimited_rate():
return 'test'
return app_
@pytest.fixture()
def app_with_no_limiter(base_app):
"""Flask application fixture without limiter registered."""
with base_app.app_context():
yield base_app
@pytest.yield_fixture()
def app(base_app):
"""Flask application fixture."""
base_app.config.update(
APP_ALLOWED_HOSTS=['localhost'],
RATELIMIT_APPLICATION=set_rate_limit,
RATELIMIT_GUEST_USER='2 per second',
RATELIMIT_AUTHENTICATED_USER='5 per second',
RATELIMIT_PER_ENDPOINT={'unlimited_rate': '200 per second'},
RATELIMIT_HEADERS_ENABLED=True
)
Limiter(
base_app,
key_func=obj_or_import_string(
base_app.config.get('RATELIMIT_KEY_FUNC'),
default=useragent_and_ip_limit_key)
)
with base_app.app_context():
yield base_app
@pytest.fixture()
def wsgi_apps():
"""Wsgi app fixture."""
from invenio_base.app import create_app_factory
from invenio_base.wsgi import create_wsgi_factory, wsgi_proxyfix
def _config(app, **kwargs):
app.config.update(
SECRET_KEY='SECRET_KEY',
TESTING=True,
)
app.config['APP_DEFAULT_SECURE_HEADERS'] = APP_DEFAULT_SECURE_HEADERS
app.config['APP_DEFAULT_SECURE_HEADERS']['force_https'] = False
# API
create_api = create_app_factory(
'invenio',
config_loader=_config,
wsgi_factory=wsgi_proxyfix(),
)
# UI
create_ui = create_app_factory(
'invenio',
config_loader=_config,
wsgi_factory=wsgi_proxyfix(),
)
# Combined
create_app = create_app_factory(
'invenio',
config_loader=_config,
wsgi_factory=wsgi_proxyfix(create_wsgi_factory({'/api': create_api})),
)
return create_app, create_ui, create_api
@pytest.fixture()
def create_mocked_flask_security_with_user_init():
"""Create a function initializing flask security with a user."""
def mocked_flask_security(user):
"""Add mocked flask-security."""
module_name = 'flask_security'
test_api_module = imp.new_module(module_name)
test_api_module.current_user = \
namedtuple("User", user.keys())(*user.values())
sys.modules[module_name] = test_api_module
return test_api_module
return mocked_flask_security
@pytest.fixture()
def push_rate_limit_to_context():
"""Push a custom rate limit to the Flask global context."""
custom_rate_limit = '10 per second'
setattr(g, 'user_rate_limit', custom_rate_limit)
return custom_rate_limit | tests/conftest.py | from __future__ import absolute_import, print_function
import imp
import sys
from collections import namedtuple
import pytest
from flask import Flask
from flask import current_app as flask_current_app
from flask import g
from flask_limiter import Limiter
from mock import patch
from pkg_resources import Distribution
from invenio_app import InvenioApp
from invenio_app.config import APP_DEFAULT_SECURE_HEADERS, set_rate_limit
from invenio_app.ext import useragent_and_ip_limit_key
from invenio_app.helpers import obj_or_import_string
@pytest.fixture()
def base_app():
"""Flask application fixture."""
app_ = Flask('testapp')
app_.config.update(
SECRET_KEY='SECRET_KEY',
TESTING=True,
)
app_.config['APP_DEFAULT_SECURE_HEADERS'] = APP_DEFAULT_SECURE_HEADERS
app_.config['APP_DEFAULT_SECURE_HEADERS']['force_https'] = False
@app_.route('/requestid')
def requestid():
from flask import g # Prevent pytest problems
return g.request_id if g and hasattr(g, 'request_id') else ''
@app_.route('/limited_rate')
def limited_rate():
return 'test'
@app_.route('/unlimited_rate')
def unlimited_rate():
return 'test'
return app_
@pytest.fixture()
def app_with_no_limiter(base_app):
"""Flask application fixture without limiter registered."""
with base_app.app_context():
yield base_app
@pytest.yield_fixture()
def app(base_app):
"""Flask application fixture."""
base_app.config.update(
APP_ALLOWED_HOSTS=['localhost'],
RATELIMIT_APPLICATION=set_rate_limit,
RATELIMIT_GUEST_USER='2 per second',
RATELIMIT_AUTHENTICATED_USER='5 per second',
RATELIMIT_PER_ENDPOINT={'unlimited_rate': '200 per second'},
RATELIMIT_HEADERS_ENABLED=True
)
Limiter(
base_app,
key_func=obj_or_import_string(
base_app.config.get('RATELIMIT_KEY_FUNC'),
default=useragent_and_ip_limit_key)
)
with base_app.app_context():
yield base_app
@pytest.fixture()
def wsgi_apps():
"""Wsgi app fixture."""
from invenio_base.app import create_app_factory
from invenio_base.wsgi import create_wsgi_factory, wsgi_proxyfix
def _config(app, **kwargs):
app.config.update(
SECRET_KEY='SECRET_KEY',
TESTING=True,
)
app.config['APP_DEFAULT_SECURE_HEADERS'] = APP_DEFAULT_SECURE_HEADERS
app.config['APP_DEFAULT_SECURE_HEADERS']['force_https'] = False
# API
create_api = create_app_factory(
'invenio',
config_loader=_config,
wsgi_factory=wsgi_proxyfix(),
)
# UI
create_ui = create_app_factory(
'invenio',
config_loader=_config,
wsgi_factory=wsgi_proxyfix(),
)
# Combined
create_app = create_app_factory(
'invenio',
config_loader=_config,
wsgi_factory=wsgi_proxyfix(create_wsgi_factory({'/api': create_api})),
)
return create_app, create_ui, create_api
@pytest.fixture()
def create_mocked_flask_security_with_user_init():
"""Create a function initializing flask security with a user."""
def mocked_flask_security(user):
"""Add mocked flask-security."""
module_name = 'flask_security'
test_api_module = imp.new_module(module_name)
test_api_module.current_user = \
namedtuple("User", user.keys())(*user.values())
sys.modules[module_name] = test_api_module
return test_api_module
return mocked_flask_security
@pytest.fixture()
def push_rate_limit_to_context():
"""Push a custom rate limit to the Flask global context."""
custom_rate_limit = '10 per second'
setattr(g, 'user_rate_limit', custom_rate_limit)
return custom_rate_limit | 0.457137 | 0.072243 |
import datetime
import os
from test.splitgraph.conftest import INGESTION_RESOURCES
from unittest.mock import MagicMock
from click.testing import CliRunner
from splitgraph.commandline.ingestion import csv_import
from splitgraph.core.repository import Repository
from splitgraph.core.types import Credentials, Params
from splitgraph.engine import ResultShape
from splitgraph.engine.postgres.engine import PsycopgEngine
from splitgraph.ingestion.dbt.data_source import DBTDataSource
_REPO_PATH = "https://github.com/splitgraph/jaffle_shop_archive"
def test_dbt_data_source_params_parsing():
engine = MagicMock(PsycopgEngine)
source = DBTDataSource(
engine,
credentials=Credentials({"git_url": _REPO_PATH}),
params=Params(
{
"sources": [
{
"dbt_source_name": "raw_jaffle_shop",
"namespace": "ingestion-raw",
"repository": "jaffle-shop",
"hash_or_tag": "test-branch",
},
{
"dbt_source_name": "other_source",
"namespace": "other-ns",
"repository": "other-repo",
},
],
}
),
)
assert source.source_map == {
"other_source": (
"other-ns",
"other-repo",
"latest",
),
"raw_jaffle_shop": (
"ingestion-raw",
"jaffle-shop",
"test-branch",
),
}
assert source.git_branch == "master"
def test_dbt_data_source_introspection(local_engine_empty):
# We can do introspection without the source map defined, but we do need an engine connection.
# Use the branch with the v2 config version
source = DBTDataSource(
local_engine_empty,
credentials=Credentials({"git_url": _REPO_PATH}),
params=Params({"git_branch": "sg-integration-test"}),
)
result = source.introspect()
assert len(result) == 5
# We currently don't return a table schema (we can't know it) or params (pointless, as we
# don't let the user remap dbt model names to other table names).
assert result["customer_orders"] == ([], {})
def test_dbt_data_source_load(local_engine_empty):
# Make a local Splitgraph repo out of the CSV files
basedir = os.path.join(INGESTION_RESOURCES, "dbt", "jaffle_csv")
# Use two repositories to test out the source <> image remapper. In the integration test
# project, we use one source for the customers table and a different one for the orders/payments
# tables.
customers_repo = Repository("test", "raw-jaffle-data-customers")
orders_repo = Repository("test", "raw-jaffle-data-orders")
customers_repo.init()
orders_repo.init()
customers_repo.commit_engines()
tables = ["customers", "orders", "payments"]
for table in tables:
runner = CliRunner()
result = runner.invoke(
csv_import,
[
str(customers_repo) if table == "customers" else str(orders_repo),
table,
"-f",
os.path.join(basedir, f"raw_{table}.csv"),
],
catch_exceptions=False,
)
assert result.exit_code == 0
customers_repo.commit()
orders_repo.commit()
assert sorted(customers_repo.images["latest"].get_tables()) == ["customers"]
assert sorted(orders_repo.images["latest"].get_tables()) == ["orders", "payments"]
# Set up the data source
source = DBTDataSource(
local_engine_empty,
credentials=Credentials({"git_url": _REPO_PATH}),
params=Params(
{
"sources": [
{
"dbt_source_name": "raw_jaffle_shop_customers",
"namespace": customers_repo.namespace,
"repository": customers_repo.repository,
},
{
"dbt_source_name": "raw_jaffle_shop_orders",
"namespace": orders_repo.namespace,
"repository": orders_repo.repository,
},
],
"git_branch": "sg-integration-test",
}
),
)
assert sorted(source.get_required_images()) == sorted(
[
(customers_repo.namespace, customers_repo.repository, "latest"),
(orders_repo.namespace, orders_repo.repository, "latest"),
]
)
target_repo = Repository("test", "jaffle-processed")
# Test build of one model (including its parents)
source.load(repository=target_repo, tables=["fct_orders"])
result = target_repo.images["latest"]
# fct_orders depends on order_payments, so we pull it here too
assert sorted(result.get_tables()) == ["fct_orders", "order_payments"]
with result.query_schema() as s:
assert (
result.engine.run_sql_in(
s, "SELECT COUNT(1) FROM fct_orders", return_shape=ResultShape.ONE_ONE
)
== 99
)
assert result.engine.run_sql_in(
s, "SELECT * FROM fct_orders ORDER BY order_date DESC LIMIT 1"
) == [
(
99,
85,
datetime.date(2018, 4, 9),
"placed",
24,
0,
0,
0,
24,
),
]
assert (
result.engine.run_sql_in(
s, "SELECT COUNT(1) FROM order_payments", return_shape=ResultShape.ONE_ONE
)
== 99
)
# Test build of all models
source.load(repository=target_repo)
result = target_repo.images["latest"]
assert sorted(result.get_tables()) == [
"customer_orders",
"customer_payments",
"dim_customers",
"fct_orders",
"order_payments",
] | test/splitgraph/ingestion/test_dbt_data_source.py | import datetime
import os
from test.splitgraph.conftest import INGESTION_RESOURCES
from unittest.mock import MagicMock
from click.testing import CliRunner
from splitgraph.commandline.ingestion import csv_import
from splitgraph.core.repository import Repository
from splitgraph.core.types import Credentials, Params
from splitgraph.engine import ResultShape
from splitgraph.engine.postgres.engine import PsycopgEngine
from splitgraph.ingestion.dbt.data_source import DBTDataSource
_REPO_PATH = "https://github.com/splitgraph/jaffle_shop_archive"
def test_dbt_data_source_params_parsing():
engine = MagicMock(PsycopgEngine)
source = DBTDataSource(
engine,
credentials=Credentials({"git_url": _REPO_PATH}),
params=Params(
{
"sources": [
{
"dbt_source_name": "raw_jaffle_shop",
"namespace": "ingestion-raw",
"repository": "jaffle-shop",
"hash_or_tag": "test-branch",
},
{
"dbt_source_name": "other_source",
"namespace": "other-ns",
"repository": "other-repo",
},
],
}
),
)
assert source.source_map == {
"other_source": (
"other-ns",
"other-repo",
"latest",
),
"raw_jaffle_shop": (
"ingestion-raw",
"jaffle-shop",
"test-branch",
),
}
assert source.git_branch == "master"
def test_dbt_data_source_introspection(local_engine_empty):
# We can do introspection without the source map defined, but we do need an engine connection.
# Use the branch with the v2 config version
source = DBTDataSource(
local_engine_empty,
credentials=Credentials({"git_url": _REPO_PATH}),
params=Params({"git_branch": "sg-integration-test"}),
)
result = source.introspect()
assert len(result) == 5
# We currently don't return a table schema (we can't know it) or params (pointless, as we
# don't let the user remap dbt model names to other table names).
assert result["customer_orders"] == ([], {})
def test_dbt_data_source_load(local_engine_empty):
# Make a local Splitgraph repo out of the CSV files
basedir = os.path.join(INGESTION_RESOURCES, "dbt", "jaffle_csv")
# Use two repositories to test out the source <> image remapper. In the integration test
# project, we use one source for the customers table and a different one for the orders/payments
# tables.
customers_repo = Repository("test", "raw-jaffle-data-customers")
orders_repo = Repository("test", "raw-jaffle-data-orders")
customers_repo.init()
orders_repo.init()
customers_repo.commit_engines()
tables = ["customers", "orders", "payments"]
for table in tables:
runner = CliRunner()
result = runner.invoke(
csv_import,
[
str(customers_repo) if table == "customers" else str(orders_repo),
table,
"-f",
os.path.join(basedir, f"raw_{table}.csv"),
],
catch_exceptions=False,
)
assert result.exit_code == 0
customers_repo.commit()
orders_repo.commit()
assert sorted(customers_repo.images["latest"].get_tables()) == ["customers"]
assert sorted(orders_repo.images["latest"].get_tables()) == ["orders", "payments"]
# Set up the data source
source = DBTDataSource(
local_engine_empty,
credentials=Credentials({"git_url": _REPO_PATH}),
params=Params(
{
"sources": [
{
"dbt_source_name": "raw_jaffle_shop_customers",
"namespace": customers_repo.namespace,
"repository": customers_repo.repository,
},
{
"dbt_source_name": "raw_jaffle_shop_orders",
"namespace": orders_repo.namespace,
"repository": orders_repo.repository,
},
],
"git_branch": "sg-integration-test",
}
),
)
assert sorted(source.get_required_images()) == sorted(
[
(customers_repo.namespace, customers_repo.repository, "latest"),
(orders_repo.namespace, orders_repo.repository, "latest"),
]
)
target_repo = Repository("test", "jaffle-processed")
# Test build of one model (including its parents)
source.load(repository=target_repo, tables=["fct_orders"])
result = target_repo.images["latest"]
# fct_orders depends on order_payments, so we pull it here too
assert sorted(result.get_tables()) == ["fct_orders", "order_payments"]
with result.query_schema() as s:
assert (
result.engine.run_sql_in(
s, "SELECT COUNT(1) FROM fct_orders", return_shape=ResultShape.ONE_ONE
)
== 99
)
assert result.engine.run_sql_in(
s, "SELECT * FROM fct_orders ORDER BY order_date DESC LIMIT 1"
) == [
(
99,
85,
datetime.date(2018, 4, 9),
"placed",
24,
0,
0,
0,
24,
),
]
assert (
result.engine.run_sql_in(
s, "SELECT COUNT(1) FROM order_payments", return_shape=ResultShape.ONE_ONE
)
== 99
)
# Test build of all models
source.load(repository=target_repo)
result = target_repo.images["latest"]
assert sorted(result.get_tables()) == [
"customer_orders",
"customer_payments",
"dim_customers",
"fct_orders",
"order_payments",
] | 0.63409 | 0.334739 |
import random
import argparse
import sys
import os
import json
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..')))
from common.pytorch.ner.model import Tagger
def lines(path):
with open(path) as f:
return [l[:-1] for l in f.readlines()]
def invert(xs):
return { t: i for i, t in enumerate(xs) }
def harmonic_mean(a, b):
if a == 0 or b == 0:
return 0
m = ((1 / a) + (1 / b)) / 2
return 1 / m
def print_stat(name, value):
print('%s: %.2f%%' % (name, (100 * value)))
def run_epoch(model, criterion, optimizer, data, eos, sos_tag):
words = data['words']
tags = data['tags']
sos_offset = 1 if sos_tag == None else 2
print('Training...')
count, epoch_loss = 0, 0
for i, j in zip(eos, eos[1:]):
print('%s/%s' % (count, len(eos)-1), end='\r')
count += 1
# <EOS>, <SOS>, ..., <EOS>, <SOS>, ...
sentence = words[i+sos_offset:j]
sentence_tags = tags[i+sos_offset:j]
optimizer.zero_grad()
loss = criterion(sentence, sentence_tags)
epoch_loss += loss.item()
loss.backward()
optimizer.step()
print('Epoch avg loss: %.6f' % (epoch_loss / count))
def compute_stats(model, data, eos, nop_tag, sos_tag):
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# removes initial <SOS> tag if present
sos_offset = 1 if sos_tag == None else 2
words = data['words']
tags = data['tags']
print('Computing accuracy...')
count = 0
correct = 0
nop_predicted_as_nop = 0
nop_predicted_as_tag = 0
tag_predicted_correctly = 0
tag_predicted_as_nop = 0
tag_predicted_as_other_tag = 0
for i, j in zip(eos, eos[1:]):
print('%s/%s' % (count, len(eos)-1), end='\r')
count += 1
sentence = words[i+sos_offset:j]
real_tags = tags[i+sos_offset:j]
model.zero_grad()
_, predicted_tags = model(sentence)
predicted_tags = torch.tensor(predicted_tags).to(device)
real_tags_nop = real_tags == nop_tag
predicted_tags_nop = predicted_tags == nop_tag
matches = real_tags == predicted_tags
nop_predicted_as_nop += (real_tags_nop * matches).sum().item()
nop_predicted_as_tag += (real_tags_nop * (1 - matches)).sum().item()
tag_predicted_correctly += ((1 - real_tags_nop) * matches).sum().item()
tag_predicted_as_nop += ((1 - real_tags_nop) * (1 - matches) * predicted_tags_nop).sum().item()
tag_predicted_as_other_tag += ((1 - real_tags_nop) * (1 - matches) * (1 - predicted_tags_nop)).sum().item()
#print(tag_predicted_correctly, nop_predicted_as_tag, nop_predicted_as_nop, tag_predicted_as_other_tag, tag_predicted_as_nop)
predicted_as_tag = tag_predicted_correctly + nop_predicted_as_tag + tag_predicted_as_other_tag
actual_tags = tag_predicted_correctly + tag_predicted_as_nop + tag_predicted_as_other_tag
precision = tag_predicted_correctly / predicted_as_tag if (predicted_as_tag > 0) else 0
recall = tag_predicted_correctly / actual_tags if (actual_tags > 0) else 0
f1 = harmonic_mean(precision, recall)
#SOS and EOS are not tags to be predicted
tags_to_predict = tag_predicted_correctly + tag_predicted_as_nop + nop_predicted_as_tag + nop_predicted_as_nop + tag_predicted_as_other_tag
accuracy = (nop_predicted_as_nop + tag_predicted_correctly) / tags_to_predict
print_stat('Accuracy', accuracy)
print_stat('Precision', precision)
print_stat('Recall', recall)
print_stat('F1-score', f1)
return {'accuracy': accuracy, 'precision': precision, 'recall': recall, 'f1': f1}
def show_example(model, data, eos, indices, sos_tag):
device = 'cuda' if torch.cuda.is_available() else 'cpu'
words = data['words']
tags = data['tags']
word_index = indices['words']
tag_index = indices['tags']
i = random.randint(0, len(eos)-2)
sos_offset = 1 if sos_tag==None else 2
start, end = eos[i], eos[i+1]
sentence = words[start+sos_offset:end]
real_tags = tags[start+sos_offset:end]
text = ' '.join(word_index[i] for i in sentence.data)
real_tag_text = ' '.join(tag_index[i] for i in real_tags.data)
print('> ' + text)
print('Actual tags:')
print('> ' + real_tag_text)
_, predicted_tags = model(sentence)
predicted_tags = torch.tensor(predicted_tags).to(device)
predicted_tags_text = ' '.join(tag_index[i] for i in predicted_tags)
print('Predicted tags:')
print('> ' + predicted_tags_text)
def write_results(stats, options, epoch):
if options.results is not None:
results = {
'epoch': epoch,
'params': {
'num-epochs': options.num_epochs,
'model': options.model,
'train-words': options.train_words,
'train-tags': options.train_tags,
'test-words': options.test_words,
'test-tags': options.test_tags,
'embedding': options.embeddings,
'learning-rate': options.learning_rate,
'momentum': options.momentum,
'dropout': options.dropout,
'num-layers': options.num_layers,
'hidden-dim': options.hidden_dim,
'bidirectional': not options.unidirectional
},
'metrics': stats
}
with open(options.results, 'w') as f:
json.dump(results, f)
def parse_options():
parser = argparse.ArgumentParser(description='Run LSTM')
parser.add_argument('--train-words', required=True, help='the file that contains the tensor with the training inputs')
parser.add_argument('--train-tags', required=True, help='the file that contains the tensor with the training labels')
parser.add_argument('--test-words', required=True, help='the file that contains the tensor with the test inputs')
parser.add_argument('--test-tags', required=True, help='the file that contains the tensor with the test labels')
parser.add_argument('--eos-limit', type=int, default=None, help='number of sentences to use for train and test. Tipically used during debug to reduce epoch time.')
parser.add_argument('--word-index', required=True, help='the file that contains the word index')
parser.add_argument('--tag-index', required=True, help='the file that contains the tag index')
parser.add_argument('--model', required=True, help='the model file')
parser.add_argument('--results', help='the file where the performances of the saved model will be written')
parser.add_argument('--embeddings', help='optional word embeddings')
parser.add_argument('--num-epochs', type=int, default=30, help='number of training epochs')
parser.add_argument('--num-layers', type=int, default=1, help='number of RNN layers')
parser.add_argument('--hidden-dim', type=int, default=300, help='number of neurons of each RNN hidden layer')
parser.add_argument('--unidirectional', action='store_true', default=False, help='if this option is given, unidirectional (not bidirectiona) RNN is created')
parser.add_argument('--learning-rate', type=float, default=0.1, help='learning rate')
parser.add_argument('--momentum', type=float, default=0.8, help='momentum')
parser.add_argument('--dropout', default=0, type=float, help='dropout')
parser.add_argument('--resume', action='store_true', default=False, help='if True model is loaded from model path, else a new model is created')
return parser.parse_args()
def main():
torch.manual_seed(1)
options = parse_options()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
train_words = torch.load(options.train_words).to(device)
train_tags = torch.load(options.train_tags).to(device)
test_words = torch.load(options.test_words).to(device)
test_tags = torch.load(options.test_tags).to(device)
word_index = lines(options.word_index)
tag_index = lines(options.tag_index)
if options.embeddings is not None:
embeddings = torch.load(options.embeddings).to(device)
embedding_len, embedding_dim = embeddings.shape
if embedding_len!=len(word_index):
raise Exception("number of words vectors in embedding %d != number of words in index %s" %(embedding_len, len(word_index)))
else:
embeddings = None
embedding_dim = 300
sos_tag = tag_index.index('<SOS>') if '<SOS>' in tag_index else None
eos_tag = tag_index.index('<EOS>')
nop_tag = tag_index.index('O')
train_eos = (train_tags == eos_tag).nonzero().squeeze().tolist()
test_eos = (test_tags == eos_tag).nonzero().squeeze().tolist()
train_eos = train_eos if options.eos_limit==None else train_eos[:options.eos_limit]
test_eos = test_eos if options.eos_limit==None else test_eos[:options.eos_limit]
print('Number of training sentences: %s' % (len(train_eos) - 1))
print('Number of test sentences: %s' % (len(test_eos) - 1))
if options.resume:
with open(options.model, 'rb') as f:
model = torch.load(f)
print('model resumed')
else:
model = Tagger(
vocab_size=len(word_index),
tag_index=tag_index,
embedding_dim=embedding_dim,
hidden_dim=options.hidden_dim,
num_layers=options.num_layers,
dropout=options.dropout,
bidirectional=not options.unidirectional
)
model = model.to(device)
criterion = model.neg_log_likelihood
optimizer = optim.SGD(model.parameters(), lr=options.learning_rate, momentum=options.momentum)
train_data = {
'words': train_words,
'tags': train_tags
}
test_data = {
'words': test_words,
'tags': test_tags
}
indices = {
'words': word_index,
'tags': tag_index
}
best_f1 = 0
for epoch in range(options.num_epochs):
print('====Epoch %s of %s====' % (epoch + 1, options.num_epochs))
run_epoch(model, criterion, optimizer, train_data, train_eos, sos_tag)
show_example(model, train_data, train_eos, indices, sos_tag)
stats = compute_stats(model, test_data, test_eos, nop_tag, sos_tag)
f1 = stats['f1']
if f1 > best_f1:
best_f1 = f1
with open(options.model, 'wb') as f:
torch.save(model, options.model)
write_results(stats, options, epoch)
if __name__ == '__main__':
main() | src/training/pytorch/ner/train.py | import random
import argparse
import sys
import os
import json
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..')))
from common.pytorch.ner.model import Tagger
def lines(path):
with open(path) as f:
return [l[:-1] for l in f.readlines()]
def invert(xs):
return { t: i for i, t in enumerate(xs) }
def harmonic_mean(a, b):
if a == 0 or b == 0:
return 0
m = ((1 / a) + (1 / b)) / 2
return 1 / m
def print_stat(name, value):
print('%s: %.2f%%' % (name, (100 * value)))
def run_epoch(model, criterion, optimizer, data, eos, sos_tag):
words = data['words']
tags = data['tags']
sos_offset = 1 if sos_tag == None else 2
print('Training...')
count, epoch_loss = 0, 0
for i, j in zip(eos, eos[1:]):
print('%s/%s' % (count, len(eos)-1), end='\r')
count += 1
# <EOS>, <SOS>, ..., <EOS>, <SOS>, ...
sentence = words[i+sos_offset:j]
sentence_tags = tags[i+sos_offset:j]
optimizer.zero_grad()
loss = criterion(sentence, sentence_tags)
epoch_loss += loss.item()
loss.backward()
optimizer.step()
print('Epoch avg loss: %.6f' % (epoch_loss / count))
def compute_stats(model, data, eos, nop_tag, sos_tag):
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# removes initial <SOS> tag if present
sos_offset = 1 if sos_tag == None else 2
words = data['words']
tags = data['tags']
print('Computing accuracy...')
count = 0
correct = 0
nop_predicted_as_nop = 0
nop_predicted_as_tag = 0
tag_predicted_correctly = 0
tag_predicted_as_nop = 0
tag_predicted_as_other_tag = 0
for i, j in zip(eos, eos[1:]):
print('%s/%s' % (count, len(eos)-1), end='\r')
count += 1
sentence = words[i+sos_offset:j]
real_tags = tags[i+sos_offset:j]
model.zero_grad()
_, predicted_tags = model(sentence)
predicted_tags = torch.tensor(predicted_tags).to(device)
real_tags_nop = real_tags == nop_tag
predicted_tags_nop = predicted_tags == nop_tag
matches = real_tags == predicted_tags
nop_predicted_as_nop += (real_tags_nop * matches).sum().item()
nop_predicted_as_tag += (real_tags_nop * (1 - matches)).sum().item()
tag_predicted_correctly += ((1 - real_tags_nop) * matches).sum().item()
tag_predicted_as_nop += ((1 - real_tags_nop) * (1 - matches) * predicted_tags_nop).sum().item()
tag_predicted_as_other_tag += ((1 - real_tags_nop) * (1 - matches) * (1 - predicted_tags_nop)).sum().item()
#print(tag_predicted_correctly, nop_predicted_as_tag, nop_predicted_as_nop, tag_predicted_as_other_tag, tag_predicted_as_nop)
predicted_as_tag = tag_predicted_correctly + nop_predicted_as_tag + tag_predicted_as_other_tag
actual_tags = tag_predicted_correctly + tag_predicted_as_nop + tag_predicted_as_other_tag
precision = tag_predicted_correctly / predicted_as_tag if (predicted_as_tag > 0) else 0
recall = tag_predicted_correctly / actual_tags if (actual_tags > 0) else 0
f1 = harmonic_mean(precision, recall)
#SOS and EOS are not tags to be predicted
tags_to_predict = tag_predicted_correctly + tag_predicted_as_nop + nop_predicted_as_tag + nop_predicted_as_nop + tag_predicted_as_other_tag
accuracy = (nop_predicted_as_nop + tag_predicted_correctly) / tags_to_predict
print_stat('Accuracy', accuracy)
print_stat('Precision', precision)
print_stat('Recall', recall)
print_stat('F1-score', f1)
return {'accuracy': accuracy, 'precision': precision, 'recall': recall, 'f1': f1}
def show_example(model, data, eos, indices, sos_tag):
device = 'cuda' if torch.cuda.is_available() else 'cpu'
words = data['words']
tags = data['tags']
word_index = indices['words']
tag_index = indices['tags']
i = random.randint(0, len(eos)-2)
sos_offset = 1 if sos_tag==None else 2
start, end = eos[i], eos[i+1]
sentence = words[start+sos_offset:end]
real_tags = tags[start+sos_offset:end]
text = ' '.join(word_index[i] for i in sentence.data)
real_tag_text = ' '.join(tag_index[i] for i in real_tags.data)
print('> ' + text)
print('Actual tags:')
print('> ' + real_tag_text)
_, predicted_tags = model(sentence)
predicted_tags = torch.tensor(predicted_tags).to(device)
predicted_tags_text = ' '.join(tag_index[i] for i in predicted_tags)
print('Predicted tags:')
print('> ' + predicted_tags_text)
def write_results(stats, options, epoch):
if options.results is not None:
results = {
'epoch': epoch,
'params': {
'num-epochs': options.num_epochs,
'model': options.model,
'train-words': options.train_words,
'train-tags': options.train_tags,
'test-words': options.test_words,
'test-tags': options.test_tags,
'embedding': options.embeddings,
'learning-rate': options.learning_rate,
'momentum': options.momentum,
'dropout': options.dropout,
'num-layers': options.num_layers,
'hidden-dim': options.hidden_dim,
'bidirectional': not options.unidirectional
},
'metrics': stats
}
with open(options.results, 'w') as f:
json.dump(results, f)
def parse_options():
parser = argparse.ArgumentParser(description='Run LSTM')
parser.add_argument('--train-words', required=True, help='the file that contains the tensor with the training inputs')
parser.add_argument('--train-tags', required=True, help='the file that contains the tensor with the training labels')
parser.add_argument('--test-words', required=True, help='the file that contains the tensor with the test inputs')
parser.add_argument('--test-tags', required=True, help='the file that contains the tensor with the test labels')
parser.add_argument('--eos-limit', type=int, default=None, help='number of sentences to use for train and test. Tipically used during debug to reduce epoch time.')
parser.add_argument('--word-index', required=True, help='the file that contains the word index')
parser.add_argument('--tag-index', required=True, help='the file that contains the tag index')
parser.add_argument('--model', required=True, help='the model file')
parser.add_argument('--results', help='the file where the performances of the saved model will be written')
parser.add_argument('--embeddings', help='optional word embeddings')
parser.add_argument('--num-epochs', type=int, default=30, help='number of training epochs')
parser.add_argument('--num-layers', type=int, default=1, help='number of RNN layers')
parser.add_argument('--hidden-dim', type=int, default=300, help='number of neurons of each RNN hidden layer')
parser.add_argument('--unidirectional', action='store_true', default=False, help='if this option is given, unidirectional (not bidirectiona) RNN is created')
parser.add_argument('--learning-rate', type=float, default=0.1, help='learning rate')
parser.add_argument('--momentum', type=float, default=0.8, help='momentum')
parser.add_argument('--dropout', default=0, type=float, help='dropout')
parser.add_argument('--resume', action='store_true', default=False, help='if True model is loaded from model path, else a new model is created')
return parser.parse_args()
def main():
torch.manual_seed(1)
options = parse_options()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
train_words = torch.load(options.train_words).to(device)
train_tags = torch.load(options.train_tags).to(device)
test_words = torch.load(options.test_words).to(device)
test_tags = torch.load(options.test_tags).to(device)
word_index = lines(options.word_index)
tag_index = lines(options.tag_index)
if options.embeddings is not None:
embeddings = torch.load(options.embeddings).to(device)
embedding_len, embedding_dim = embeddings.shape
if embedding_len!=len(word_index):
raise Exception("number of words vectors in embedding %d != number of words in index %s" %(embedding_len, len(word_index)))
else:
embeddings = None
embedding_dim = 300
sos_tag = tag_index.index('<SOS>') if '<SOS>' in tag_index else None
eos_tag = tag_index.index('<EOS>')
nop_tag = tag_index.index('O')
train_eos = (train_tags == eos_tag).nonzero().squeeze().tolist()
test_eos = (test_tags == eos_tag).nonzero().squeeze().tolist()
train_eos = train_eos if options.eos_limit==None else train_eos[:options.eos_limit]
test_eos = test_eos if options.eos_limit==None else test_eos[:options.eos_limit]
print('Number of training sentences: %s' % (len(train_eos) - 1))
print('Number of test sentences: %s' % (len(test_eos) - 1))
if options.resume:
with open(options.model, 'rb') as f:
model = torch.load(f)
print('model resumed')
else:
model = Tagger(
vocab_size=len(word_index),
tag_index=tag_index,
embedding_dim=embedding_dim,
hidden_dim=options.hidden_dim,
num_layers=options.num_layers,
dropout=options.dropout,
bidirectional=not options.unidirectional
)
model = model.to(device)
criterion = model.neg_log_likelihood
optimizer = optim.SGD(model.parameters(), lr=options.learning_rate, momentum=options.momentum)
train_data = {
'words': train_words,
'tags': train_tags
}
test_data = {
'words': test_words,
'tags': test_tags
}
indices = {
'words': word_index,
'tags': tag_index
}
best_f1 = 0
for epoch in range(options.num_epochs):
print('====Epoch %s of %s====' % (epoch + 1, options.num_epochs))
run_epoch(model, criterion, optimizer, train_data, train_eos, sos_tag)
show_example(model, train_data, train_eos, indices, sos_tag)
stats = compute_stats(model, test_data, test_eos, nop_tag, sos_tag)
f1 = stats['f1']
if f1 > best_f1:
best_f1 = f1
with open(options.model, 'wb') as f:
torch.save(model, options.model)
write_results(stats, options, epoch)
if __name__ == '__main__':
main() | 0.445771 | 0.313906 |
import sys
import MySQLdb
import argparse
import progressbar
import pandas as pd
from collections import OrderedDict
OUTPUT_FILE = 'db_conflicts.csv'
def main(user, passwd, database):
# Open database connection
db = MySQLdb.connect("localhost", user, passwd, database)
# Prepare a cursor object using cursor() method.
cursor = db.cursor()
# Retrieve all created conflicts.
query = """SELECT con_id, conf_id, clause_id_1, clause_id_2, type_id
FROM conflicts
WHERE classifier_id is NULL"""
cursor.execute(query)
clauses_tup = cursor.fetchall()
# Open file to write.
w_file = open(OUTPUT_FILE, 'w')
# Write header.
d = OrderedDict()
d['conflict_id'] = list()
d['contract_id'] = list()
d['norm_id_1'] = list()
d['norm_id_2'] = list()
d['norm1'] = list()
d['norm2'] = list()
d['conf_type'] = list()
# Fetch a single row using fetchone() method.
for tup in clauses_tup:
con_id = tup[0]
conf_id = tup[1]
clause_id_1 = tup[2]
clause_id_2 = tup[3]
type_id = tup[4]
if not type_id:
type_id = 1
elif int(type_id) == 2:
continue
# Get contract path.
cntrct_path_query = """SELECT path_to_file
FROM contracts
WHERE con_id=%d""" % con_id
cursor.execute(cntrct_path_query)
contract_path = cursor.fetchone()[0]
# Get contract text.
contract_text = open(contract_path, 'r').read()
# Get the range for clause 1.
rng_1_query = """SELECT clause_range
FROM clauses
WHERE clause_id=%d""" % clause_id_1
cursor.execute(rng_1_query)
clause_1_range = cursor.fetchone()[0]
clause_1_range = clause_1_range.strip('()').split(',')
# Get the range for clause 2.
rng_2_query = """SELECT clause_range
FROM clauses
WHERE clause_id=%d""" % clause_id_2
cursor.execute(rng_2_query)
clause_2_range = cursor.fetchone()[0]
clause_2_range = clause_2_range.strip('()').split(',')
# Get clause texts.
clause_1 = contract_text[int(clause_1_range[0]):int(clause_1_range[1])]
clause_2 = contract_text[int(clause_2_range[0]):int(clause_2_range[1])]
# Store clause pair to a list.
if clause_1 and clause_2:
d['contract_id'].append(con_id)
d['conflict_id'].append(conf_id)
d['norm_id_1'].append(clause_id_1)
d['norm_id_2'].append(clause_id_2)
d['norm1'].append(clause_1)
d['norm2'].append(clause_2)
d['conf_type'].append(type_id)
# Disconnect from database.
db.close()
df = pd.DataFrame(data=d)
df.to_csv(OUTPUT_FILE, index=False)
print "Conflicts gathered and saved at %s" % OUTPUT_FILE
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("db_user", help="DB username.")
parser.add_argument("passwd", help="<PASSWORD>.")
parser.add_argument("database", help="DB name to connect.")
parser.add_argument("--output_file", help="Path name to the output file.")
args = parser.parse_args()
if args.output_file:
OUTPUT_FILE = args.output_file
main(args.db_user, args.passwd, args.database) | codes/scripts/get_db_conflicts.py | import sys
import MySQLdb
import argparse
import progressbar
import pandas as pd
from collections import OrderedDict
OUTPUT_FILE = 'db_conflicts.csv'
def main(user, passwd, database):
# Open database connection
db = MySQLdb.connect("localhost", user, passwd, database)
# Prepare a cursor object using cursor() method.
cursor = db.cursor()
# Retrieve all created conflicts.
query = """SELECT con_id, conf_id, clause_id_1, clause_id_2, type_id
FROM conflicts
WHERE classifier_id is NULL"""
cursor.execute(query)
clauses_tup = cursor.fetchall()
# Open file to write.
w_file = open(OUTPUT_FILE, 'w')
# Write header.
d = OrderedDict()
d['conflict_id'] = list()
d['contract_id'] = list()
d['norm_id_1'] = list()
d['norm_id_2'] = list()
d['norm1'] = list()
d['norm2'] = list()
d['conf_type'] = list()
# Fetch a single row using fetchone() method.
for tup in clauses_tup:
con_id = tup[0]
conf_id = tup[1]
clause_id_1 = tup[2]
clause_id_2 = tup[3]
type_id = tup[4]
if not type_id:
type_id = 1
elif int(type_id) == 2:
continue
# Get contract path.
cntrct_path_query = """SELECT path_to_file
FROM contracts
WHERE con_id=%d""" % con_id
cursor.execute(cntrct_path_query)
contract_path = cursor.fetchone()[0]
# Get contract text.
contract_text = open(contract_path, 'r').read()
# Get the range for clause 1.
rng_1_query = """SELECT clause_range
FROM clauses
WHERE clause_id=%d""" % clause_id_1
cursor.execute(rng_1_query)
clause_1_range = cursor.fetchone()[0]
clause_1_range = clause_1_range.strip('()').split(',')
# Get the range for clause 2.
rng_2_query = """SELECT clause_range
FROM clauses
WHERE clause_id=%d""" % clause_id_2
cursor.execute(rng_2_query)
clause_2_range = cursor.fetchone()[0]
clause_2_range = clause_2_range.strip('()').split(',')
# Get clause texts.
clause_1 = contract_text[int(clause_1_range[0]):int(clause_1_range[1])]
clause_2 = contract_text[int(clause_2_range[0]):int(clause_2_range[1])]
# Store clause pair to a list.
if clause_1 and clause_2:
d['contract_id'].append(con_id)
d['conflict_id'].append(conf_id)
d['norm_id_1'].append(clause_id_1)
d['norm_id_2'].append(clause_id_2)
d['norm1'].append(clause_1)
d['norm2'].append(clause_2)
d['conf_type'].append(type_id)
# Disconnect from database.
db.close()
df = pd.DataFrame(data=d)
df.to_csv(OUTPUT_FILE, index=False)
print "Conflicts gathered and saved at %s" % OUTPUT_FILE
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("db_user", help="DB username.")
parser.add_argument("passwd", help="<PASSWORD>.")
parser.add_argument("database", help="DB name to connect.")
parser.add_argument("--output_file", help="Path name to the output file.")
args = parser.parse_args()
if args.output_file:
OUTPUT_FILE = args.output_file
main(args.db_user, args.passwd, args.database) | 0.265785 | 0.099996 |
import hashlib
import time
DISTRIBUTION_NAME = 'sawtooth-payment'
DEFAULT_URL = 'http://127.0.0.1:8009'
TP_FAMILYNAME = 'payment'
TP_VERSION = '1.0'
PAYMENT_ENTITY_CODE = '01'
PATIENT_ENTITY_CODE = '02'
CONTRACT_ENTITY_CODE = '03'
CONTRACT_PAYMENT__RELATION_CODE = "51"
PAYMENT_CONTRACT__RELATION_CODE = "52"
PATIENT_PAYMENT__RELATION_CODE = "61"
PAYMENT_PATIENT__RELATION_CODE = "62"
def _hash(identifier):
return hashlib.sha512(identifier.encode('utf-8')).hexdigest()
TP_PREFFIX_HEX6 = _hash(TP_FAMILYNAME)[0:6]
# Payment entity
def make_payment_address(payment_id):
return TP_PREFFIX_HEX6 + PAYMENT_ENTITY_CODE + _hash(payment_id)[:62]
def make_payment_list_address():
return TP_PREFFIX_HEX6 + PAYMENT_ENTITY_CODE
# Contract <-> Payment relation
def make_contract_payment__relation_address(contract_id, payment_id):
return TP_PREFFIX_HEX6 + CONTRACT_PAYMENT__RELATION_CODE + \
CONTRACT_ENTITY_CODE + _hash(contract_id)[:30] + \
PAYMENT_ENTITY_CODE + _hash(payment_id)[:28]
def make_payment_list_by_contract_address(contract_id):
return TP_PREFFIX_HEX6 + CONTRACT_PAYMENT__RELATION_CODE + CONTRACT_ENTITY_CODE + _hash(contract_id)[:30]
# Payment <-> Contract relation
def make_payment_contract__relation_address(payment_id, contract_id):
return TP_PREFFIX_HEX6 + PAYMENT_CONTRACT__RELATION_CODE + \
PAYMENT_ENTITY_CODE + _hash(payment_id)[:30] + \
CONTRACT_ENTITY_CODE + _hash(contract_id)[:28]
def make_contract_list_by_payment_address(payment_id):
return TP_PREFFIX_HEX6 + PAYMENT_CONTRACT__RELATION_CODE + PAYMENT_ENTITY_CODE + _hash(payment_id)[:30]
# Patient <-> Payment relation
def make_patient_payment__relation_address(patient_pkey, payment_id):
return TP_PREFFIX_HEX6 + PATIENT_PAYMENT__RELATION_CODE + \
PATIENT_ENTITY_CODE + _hash(patient_pkey)[:30] + \
PAYMENT_ENTITY_CODE + _hash(payment_id)[:28]
def make_payment_list_by_patient_address(patient_pkey):
return TP_PREFFIX_HEX6 + PATIENT_PAYMENT__RELATION_CODE + PATIENT_ENTITY_CODE + _hash(patient_pkey)[:30]
# Payment <-> Patient relation
def make_payment_patient__relation_address(payment_id, patient_pkey):
return TP_PREFFIX_HEX6 + PAYMENT_PATIENT__RELATION_CODE + \
PAYMENT_ENTITY_CODE + _hash(payment_id)[:30] + \
PATIENT_ENTITY_CODE + _hash(patient_pkey)[:28]
def make_patient_list_by_payment_address(payment_id):
return TP_PREFFIX_HEX6 + PAYMENT_PATIENT__RELATION_CODE + PAYMENT_ENTITY_CODE + _hash(payment_id)[:30]
def get_current_timestamp():
return int(round(time.time() * 1000)) | payment_common/helper.py | import hashlib
import time
DISTRIBUTION_NAME = 'sawtooth-payment'
DEFAULT_URL = 'http://127.0.0.1:8009'
TP_FAMILYNAME = 'payment'
TP_VERSION = '1.0'
PAYMENT_ENTITY_CODE = '01'
PATIENT_ENTITY_CODE = '02'
CONTRACT_ENTITY_CODE = '03'
CONTRACT_PAYMENT__RELATION_CODE = "51"
PAYMENT_CONTRACT__RELATION_CODE = "52"
PATIENT_PAYMENT__RELATION_CODE = "61"
PAYMENT_PATIENT__RELATION_CODE = "62"
def _hash(identifier):
return hashlib.sha512(identifier.encode('utf-8')).hexdigest()
TP_PREFFIX_HEX6 = _hash(TP_FAMILYNAME)[0:6]
# Payment entity
def make_payment_address(payment_id):
return TP_PREFFIX_HEX6 + PAYMENT_ENTITY_CODE + _hash(payment_id)[:62]
def make_payment_list_address():
return TP_PREFFIX_HEX6 + PAYMENT_ENTITY_CODE
# Contract <-> Payment relation
def make_contract_payment__relation_address(contract_id, payment_id):
return TP_PREFFIX_HEX6 + CONTRACT_PAYMENT__RELATION_CODE + \
CONTRACT_ENTITY_CODE + _hash(contract_id)[:30] + \
PAYMENT_ENTITY_CODE + _hash(payment_id)[:28]
def make_payment_list_by_contract_address(contract_id):
return TP_PREFFIX_HEX6 + CONTRACT_PAYMENT__RELATION_CODE + CONTRACT_ENTITY_CODE + _hash(contract_id)[:30]
# Payment <-> Contract relation
def make_payment_contract__relation_address(payment_id, contract_id):
return TP_PREFFIX_HEX6 + PAYMENT_CONTRACT__RELATION_CODE + \
PAYMENT_ENTITY_CODE + _hash(payment_id)[:30] + \
CONTRACT_ENTITY_CODE + _hash(contract_id)[:28]
def make_contract_list_by_payment_address(payment_id):
return TP_PREFFIX_HEX6 + PAYMENT_CONTRACT__RELATION_CODE + PAYMENT_ENTITY_CODE + _hash(payment_id)[:30]
# Patient <-> Payment relation
def make_patient_payment__relation_address(patient_pkey, payment_id):
return TP_PREFFIX_HEX6 + PATIENT_PAYMENT__RELATION_CODE + \
PATIENT_ENTITY_CODE + _hash(patient_pkey)[:30] + \
PAYMENT_ENTITY_CODE + _hash(payment_id)[:28]
def make_payment_list_by_patient_address(patient_pkey):
return TP_PREFFIX_HEX6 + PATIENT_PAYMENT__RELATION_CODE + PATIENT_ENTITY_CODE + _hash(patient_pkey)[:30]
# Payment <-> Patient relation
def make_payment_patient__relation_address(payment_id, patient_pkey):
return TP_PREFFIX_HEX6 + PAYMENT_PATIENT__RELATION_CODE + \
PAYMENT_ENTITY_CODE + _hash(payment_id)[:30] + \
PATIENT_ENTITY_CODE + _hash(patient_pkey)[:28]
def make_patient_list_by_payment_address(payment_id):
return TP_PREFFIX_HEX6 + PAYMENT_PATIENT__RELATION_CODE + PAYMENT_ENTITY_CODE + _hash(payment_id)[:30]
def get_current_timestamp():
return int(round(time.time() * 1000)) | 0.427875 | 0.04798 |
import argparse
import os
import json
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--wiki', type=str, help="The file containing the annotated wiki samples.", default='wiki-anno-samples.jsonl')
parser.add_argument('--bbc', type=str, help="The file containing the annotated bbc samples.", default='bbc-anno-samples.jsonl')
parser.add_argument('--ext', type=str, help="A file extension that should be added to the modified input files.", default='-simple')
args = parser.parse_args()
wiki_samples_fn = args.wiki
bbc_samples_fn = args.bbc
extension = args.ext
# the mapping from old to new labels
mapping = {
0: 0,
5: 1,
4: 2,
3: 3,
2: 4
}
if not os.path.isfile(wiki_samples_fn):
raise(Exception('The file ' + wiki_samples_fn + ' does not exists'))
if not os.path.isfile(bbc_samples_fn):
raise(Exception('The file ' + bbc_samples_fn + ' does not exists'))
# output file names
wiki_fn, wiki_ext = os.path.splitext(wiki_samples_fn)
wiki_out_fn = wiki_fn + extension + wiki_ext
bbc_fn, bbc_ext = os.path.splitext(bbc_samples_fn)
bbc_out_fn = bbc_fn + extension + bbc_ext
print('The output file for wiki samples will be ' + wiki_out_fn)
if os.path.isfile(wiki_out_fn):
print('The file %s already exists and will be overwritten.' % (wiki_out_fn))
print('The output file for bbc samples will be ' + bbc_out_fn)
if os.path.isfile(bbc_out_fn):
print('The file %s already exists and will be overwritten.' % (bbc_out_fn))
# read samples
with open(wiki_samples_fn, 'r') as f:
wiki_samples = [json.loads(line) for line in f]
with open(bbc_samples_fn, 'r') as f:
bbc_samples = [json.loads(line) for line in f]
print()
print('Read %d annotated bbc samples and %d annotated wiki samples.' % (len(bbc_samples), len(wiki_samples)))
# Write samples that have valid MI samples (all except those having 1, 6 or 7 as label).
# All other labels get asssigned to their new labeling according to the dict "mapping".
skipped = 0
wiki_n = 0
bbc_n = 0
with open(bbc_out_fn, 'w') as f:
for d in bbc_samples:
if d['annotation']['mi'] < 6 and d['annotation']['mi'] != 1:
bbc_n += 1
d['annotation']['mi'] = mapping[d['annotation']['mi']]
jsonLine = json.dumps(d)
f.write(jsonLine + '\n')
else:
skipped += 1
with open(wiki_out_fn, 'w') as f:
for d in wiki_samples:
if d['annotation']['mi'] < 6 and d['annotation']['mi'] != 1:
wiki_n += 1
d['annotation']['mi'] = mapping[d['annotation']['mi']]
jsonLine = json.dumps(d)
f.write(jsonLine + '\n')
else:
skipped += 1
print('Output contains %d annotated bbc samples and %d annotated wiki samples.' % (bbc_n, wiki_n))
print('Skipped %d samples in total.' % (skipped)) | annotations/simplify_annotations.py |
import argparse
import os
import json
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--wiki', type=str, help="The file containing the annotated wiki samples.", default='wiki-anno-samples.jsonl')
parser.add_argument('--bbc', type=str, help="The file containing the annotated bbc samples.", default='bbc-anno-samples.jsonl')
parser.add_argument('--ext', type=str, help="A file extension that should be added to the modified input files.", default='-simple')
args = parser.parse_args()
wiki_samples_fn = args.wiki
bbc_samples_fn = args.bbc
extension = args.ext
# the mapping from old to new labels
mapping = {
0: 0,
5: 1,
4: 2,
3: 3,
2: 4
}
if not os.path.isfile(wiki_samples_fn):
raise(Exception('The file ' + wiki_samples_fn + ' does not exists'))
if not os.path.isfile(bbc_samples_fn):
raise(Exception('The file ' + bbc_samples_fn + ' does not exists'))
# output file names
wiki_fn, wiki_ext = os.path.splitext(wiki_samples_fn)
wiki_out_fn = wiki_fn + extension + wiki_ext
bbc_fn, bbc_ext = os.path.splitext(bbc_samples_fn)
bbc_out_fn = bbc_fn + extension + bbc_ext
print('The output file for wiki samples will be ' + wiki_out_fn)
if os.path.isfile(wiki_out_fn):
print('The file %s already exists and will be overwritten.' % (wiki_out_fn))
print('The output file for bbc samples will be ' + bbc_out_fn)
if os.path.isfile(bbc_out_fn):
print('The file %s already exists and will be overwritten.' % (bbc_out_fn))
# read samples
with open(wiki_samples_fn, 'r') as f:
wiki_samples = [json.loads(line) for line in f]
with open(bbc_samples_fn, 'r') as f:
bbc_samples = [json.loads(line) for line in f]
print()
print('Read %d annotated bbc samples and %d annotated wiki samples.' % (len(bbc_samples), len(wiki_samples)))
# Write samples that have valid MI samples (all except those having 1, 6 or 7 as label).
# All other labels get asssigned to their new labeling according to the dict "mapping".
skipped = 0
wiki_n = 0
bbc_n = 0
with open(bbc_out_fn, 'w') as f:
for d in bbc_samples:
if d['annotation']['mi'] < 6 and d['annotation']['mi'] != 1:
bbc_n += 1
d['annotation']['mi'] = mapping[d['annotation']['mi']]
jsonLine = json.dumps(d)
f.write(jsonLine + '\n')
else:
skipped += 1
with open(wiki_out_fn, 'w') as f:
for d in wiki_samples:
if d['annotation']['mi'] < 6 and d['annotation']['mi'] != 1:
wiki_n += 1
d['annotation']['mi'] = mapping[d['annotation']['mi']]
jsonLine = json.dumps(d)
f.write(jsonLine + '\n')
else:
skipped += 1
print('Output contains %d annotated bbc samples and %d annotated wiki samples.' % (bbc_n, wiki_n))
print('Skipped %d samples in total.' % (skipped)) | 0.319971 | 0.174235 |
import sys
class Node(object):
""" Abstract base class for AST nodes.
"""
def children(self):
""" A sequence of all children that are Nodes
"""
pass
def __str__(self):
return self.show()
def __repr__(self):
return str(self.to_tuples())
def to_tuples(self):
result = [self.__class__.__name__]
attr_list = [getattr(self, n) for n in self.attr_names]
result.extend(attr_list)
for (child_name, child) in self.children():
result.append( child.to_tuples() )
return tuple(result)
def show(self,
buf=None,
offset=0,
attrnames=False,
nodenames=False,
showcoord=False,
_my_node_name=None):
""" Pretty print the Node and all its attributes and
children (recursively) to a buffer.
buf:
Open IO buffer into which the Node is printed.
If it is None or let empty, instead a string
is returned
offset:
Initial offset (amount of leading spaces)
attrnames:
True if you want to see the attribute names in
name=value pairs. False to only see the values.
nodenames:
True if you want to see the actual node names
within their parents.
showcoord:
Do you want the coordinates of each Node to be
displayed.
"""
s = ''
lead = ' ' * offset
if nodenames and _my_node_name is not None:
s += lead + self.__class__.__name__+ ' <' + _my_node_name + '>: '
else:
s += lead + self.__class__.__name__+ ': '
if self.attr_names:
if attrnames:
nvlist = [(n, getattr(self,n)) for n in self.attr_names]
attrstr = ', '.join('%s=%s' % nv for nv in nvlist)
else:
vlist = [getattr(self, n) for n in self.attr_names]
attrstr = ', '.join('%s' % v for v in vlist)
s += attrstr
if showcoord: s += ' (at %s)' % self.coord
s += '\n'
for (child_name, child) in self.children():
s += child.show(
buf,
offset=offset + 2,
attrnames=attrnames,
nodenames=nodenames,
showcoord=showcoord,
_my_node_name=child_name)
if buf is None: return s
else: buf.write(s)
class NodeVisitor(object):
""" A base NodeVisitor class for visiting c_ast nodes.
Subclass it and define your own visit_XXX methods, where
XXX is the class name you want to visit with these
methods.
For example:
class ConstantVisitor(NodeVisitor):
def __init__(self):
self.values = []
def visit_Constant(self, node):
self.values.append(node.value)
Creates a list of values of all the constant nodes
encountered below the given node. To use it:
cv = ConstantVisitor()
cv.visit(node)
Notes:
* generic_visit() will be called for AST nodes for which
no visit_XXX method was defined.
* The children of nodes for which a visit_XXX was
defined will not be visited - if you need this, call
generic_visit() on the node.
You can use:
NodeVisitor.generic_visit(self, node)
* Modeled after Python's own AST visiting facilities
(the ast module of Python 3.0)
"""
def visit(self, node):
""" Visit a node.
"""
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node)
def generic_visit(self, node):
""" Called if no explicit visitor function exists for a
node. Implements preorder visiting of the node.
"""
for c_name, c in node.children():
self.visit(c)
class As (Node):
def __init__(self, expr, coord=None):
self.tags = []
self.expr = expr
self.coord = coord
def children(self):
nodelist = []
if self.expr is not None: nodelist.append(("expr", self.expr))
return tuple(nodelist)
attr_names = ()
class Assert(Node):
def __init__(self, cond, expr, coord=None):
self.tags = []
self.cond = cond
self.expr = expr
self.coord = coord
def children(self):
nodelist = []
if self.cond is not None: nodelist.append(("cond", self.cond))
if self.expr is not None: nodelist.append(("expr", self.expr))
return tuple(nodelist)
attr_names = ()
class ArgumentList (Node):
def __init__(self, arguments, coord=None):
self.tags = []
self.arguments = arguments
self.coord = coord
def children(self):
nodelist = []
for i, child in enumerate(self.arguments or []):
nodelist.append(("arguments[%d]" % i, child))
return tuple(nodelist)
attr_names = ()
class Assignment(Node):
def __init__(self, op, target, right, coord=None):
self.tags = []
self.op = op
self.target = target
self.right = right
self.coord = coord
def children(self):
nodelist = []
if self.target is not None: nodelist.append(("target", self.target))
if self.right is not None: nodelist.append(("right", self.right))
return tuple(nodelist)
attr_names = ('op',)
class AttributeRef (Node):
def __init__(self, obj, field, coord=None):
self.tags = []
self.obj = obj
self.field = field
self.coord = coord
def children(self):
nodelist = []
if self.obj is not None: nodelist.append(("obj", self.obj))
if self.field is not None: nodelist.append(("field", self.field))
return tuple(nodelist)
attr_names = ()
class BinaryOp(Node):
def __init__(self, op, left, right, coord=None):
self.tags = []
self.op = op
self.left = left
self.right = right
self.coord = coord
def children(self):
nodelist = []
if self.left is not None: nodelist.append(("left", self.left))
if self.right is not None: nodelist.append(("right", self.right))
return tuple(nodelist)
attr_names = ('op',)
class Backtrack(Node):
def __init__(self, coord=None):
self.tags = []
self.coord = coord
def children(self):
return ()
attr_names = ()
class Block(Node):
def __init__(self, stmts, coord=None):
self.tags = []
self.stmts = stmts
self.coord = coord
def children(self):
nodelist = []
for i, child in enumerate(self.stmts or []):
nodelist.append(("stmts[%d]" % i, child))
return tuple(nodelist)
attr_names = ()
class Break(Node):
def __init__(self, coord=None):
self.tags = []
self.coord = coord
def children(self):
return ()
attr_names = ()
class Call (Node):
def __init__(self, name, args, coord=None):
self.tags = []
self.name = name
self.args = args
self.coord = coord
def children(self):
nodelist = []
if self.name is not None: nodelist.append(("name", self.name))
if self.args is not None: nodelist.append(("args", self.args))
return tuple(nodelist)
attr_names = ()
class Case(Node):
def __init__(self, cond, body, coord=None):
self.tags = []
self.cond = cond
self.body = body
self.coord = coord
def children(self):
nodelist = []
if self.cond is not None: nodelist.append(("cond", self.cond))
if self.body is not None: nodelist.append(("body", self.body))
return tuple(nodelist)
attr_names = ()
class CaseList (Node):
def __init__(self, cases, coord=None):
self.tags = []
self.cases = cases
self.coord = coord
def children(self):
nodelist = []
for i, child in enumerate(self.cases or []):
nodelist.append(("cases[%d]" % i, child))
return tuple(nodelist)
attr_names = ()
class CatchClause(Node):
def __init__(self, type, name, block, coord=None):
self.tags = []
self.type = type
self.name = name
self.block = block
self.coord = coord
def children(self):
nodelist = []
if self.name is not None: nodelist.append(("name", self.name))
if self.block is not None: nodelist.append(("block", self.block))
return tuple(nodelist)
attr_names = ('type',)
class Catches (Node):
def __init__(self, clauses, coord=None):
self.tags = []
self.clauses = clauses
self.coord = coord
def children(self):
nodelist = []
for i, child in enumerate(self.clauses or []):
nodelist.append(("clauses[%d]" % i, child))
return tuple(nodelist)
attr_names = ()
class Check(Node):
def __init__(self, block, coord=None):
self.tags = []
self.block = block
self.coord = coord
def children(self):
nodelist = []
if self.block is not None: nodelist.append(("block", self.block))
return tuple(nodelist)
attr_names = ()
class Class(Node):
def __init__(self, name, params, block, static, coord=None):
self.tags = []
self.name = name
self.params = params
self.block = block
self.static = static
self.coord = coord
def children(self):
nodelist = []
if self.name is not None: nodelist.append(("name", self.name))
if self.params is not None: nodelist.append(("params", self.params))
if self.block is not None: nodelist.append(("block", self.block))
if self.static is not None: nodelist.append(("static", self.static))
return tuple(nodelist)
attr_names = ()
class Comprehension (Node):
def __init__(self, klass, expr, iterators, cond, coord=None):
self.tags = []
self.klass = klass
self.expr = expr
self.iterators = iterators
self.cond = cond
self.coord = coord
def children(self):
nodelist = []
if self.expr is not None: nodelist.append(("expr", self.expr))
if self.iterators is not None: nodelist.append(("iterators", self.iterators))
if self.cond is not None: nodelist.append(("cond", self.cond))
return tuple(nodelist)
attr_names = ('klass',)
class Continue(Node):
def __init__(self, coord=None):
self.tags = []
self.coord = coord
def children(self):
return ()
attr_names = ()
class Constant(Node):
def __init__(self, klass, value, coord=None):
self.tags = []
self.klass = klass
self.value = value
self.coord = coord
def children(self):
nodelist = []
return tuple(nodelist)
attr_names = ('klass','value',)
class Default(Node):
def __init__(self, body, coord=None):
self.tags = []
self.body = body
self.coord = coord
def children(self):
nodelist = []
if self.body is not None: nodelist.append(("body", self.body))
return tuple(nodelist)
attr_names = ()
class DoWhile(Node):
def __init__(self, cond, body, coord=None):
self.tags = []
self.cond = cond
self.body = body
self.coord = coord
def children(self):
nodelist = []
if self.cond is not None: nodelist.append(("cond", self.cond))
if self.body is not None: nodelist.append(("body", self.body))
return tuple(nodelist)
attr_names = ()
class Exit(Node):
def __init__(self, coord=None):
self.tags = []
self.coord = coord
def children(self):
return ()
attr_names = ()
class ExprList(Node):
def __init__(self, exprs, coord=None):
self.tags = []
self.exprs = exprs
self.coord = coord
def children(self):
nodelist = []
for i, child in enumerate(self.exprs or []):
nodelist.append(("exprs[%d]" % i, child))
return tuple(nodelist)
attr_names = ()
class FileAST(Node):
def __init__(self, stmts, coord=None):
self.tags = []
self.stmts = stmts
self.coord = coord
def children(self):
nodelist = []
for i, child in enumerate(self.stmts or []):
nodelist.append(("stmts[%d]" % i, child))
return tuple(nodelist)
attr_names = ()
class For(Node):
def __init__(self, iterators, body, coord=None):
self.tags = []
self.iterators = iterators
self.body = body
self.coord = coord
def children(self):
nodelist = []
if self.iterators is not None: nodelist.append(("iterators", self.iterators))
if self.body is not None: nodelist.append(("body", self.body))
return tuple(nodelist)
attr_names = ()
class Identifier(Node):
def __init__(self, name, coord=None):
self.tags = []
self.name = name
self.coord = coord
def children(self):
nodelist = []
return tuple(nodelist)
attr_names = ('name',)
class If(Node):
def __init__(self, cond, iftrue, iffalse, coord=None):
self.tags = []
self.cond = cond
self.iftrue = iftrue
self.iffalse = iffalse
self.coord = coord
def children(self):
nodelist = []
if self.cond is not None: nodelist.append(("cond", self.cond))
if self.iftrue is not None: nodelist.append(("iftrue", self.iftrue))
if self.iffalse is not None: nodelist.append(("iffalse", self.iffalse))
return tuple(nodelist)
attr_names = ()
class Interpolation(Node):
def __init__(self, format_string, expressions, coord=None):
self.tags = []
self.format_string = format_string
self.expressions = expressions
self.coord = coord
def children(self):
nodelist = []
if self.format_string is not None: nodelist.append(("format_string", self.format_string))
if self.expressions is not None: nodelist.append(("expressions", self.expressions))
return tuple(nodelist)
attr_names = ()
class Iterator(Node):
def __init__(self, assignable, expression, coord=None):
self.tags = []
self.assignable = assignable
self.expression = expression
self.coord = coord
def children(self):
nodelist = []
if self.assignable is not None: nodelist.append(("assignable", self.assignable))
if self.expression is not None: nodelist.append(("expression", self.expression))
return tuple(nodelist)
attr_names = ()
class IteratorChain(Node):
def __init__(self, mode, iterators, coord=None):
self.tags = []
self.mode = mode
self.iterators = iterators
self.coord = coord
def children(self):
nodelist = []
for i, child in enumerate(self.iterators or []):
nodelist.append(("iterators[%d]" % i, child))
return tuple(nodelist)
attr_names = ('mode',)
class Lambda(Node):
def __init__(self, params, body, coord=None):
self.tags = []
self.params = params
self.body = body
self.coord = coord
def children(self):
nodelist = []
if self.params is not None: nodelist.append(("params", self.params))
if self.body is not None: nodelist.append(("body", self.body))
return tuple(nodelist)
attr_names = ()
class List(Node):
def __init__(self, items, coord=None):
self.tags = []
self.items = items
self.coord = coord
def children(self):
nodelist = []
for i, child in enumerate(self.items or []):
nodelist.append(("items[%d]" % i, child))
return tuple(nodelist)
attr_names = ()
class Match(Node):
def __init__(self, matchee, case_list, default, coord=None):
self.tags = []
self.matchee = matchee
self.case_list = case_list
self.default = default
self.coord = coord
def children(self):
nodelist = []
if self.matchee is not None: nodelist.append(("matchee", self.matchee))
if self.case_list is not None: nodelist.append(("case_list", self.case_list))
if self.default is not None: nodelist.append(("default", self.default))
return tuple(nodelist)
attr_names = ()
class MatchCase(Node):
def __init__(self, pattern, cond, body, coord=None):
self.tags = []
self.pattern = pattern
self.cond = cond
self.body = body
self.coord = coord
def children(self):
nodelist = []
if self.pattern is not None: nodelist.append(("pattern", self.pattern))
if self.cond is not None: nodelist.append(("cond", self.cond))
if self.body is not None: nodelist.append(("body", self.body))
return tuple(nodelist)
attr_names = ()
class Param (Node):
def __init__(self, name, coord=None):
self.tags = []
self.name = name
self.coord = coord
def children(self):
nodelist = []
return tuple(nodelist)
attr_names = ('name',)
class ParamList (Node):
def __init__(self, params, coord=None):
self.tags = []
self.params = params
self.coord = coord
def children(self):
nodelist = []
for i, child in enumerate(self.params or []):
nodelist.append(("params[%d]" % i, child))
return tuple(nodelist)
attr_names = ()
class Pattern(Node):
def __init__(self, head, tail, coord=None):
self.tags = []
self.head = head
self.tail = tail
self.coord = coord
def children(self):
nodelist = []
if self.head is not None: nodelist.append(("head", self.head))
if self.tail is not None: nodelist.append(("tail", self.tail))
return tuple(nodelist)
attr_names = ()
class Procedure(Node):
def __init__(self, name, clazz, params, body, coord=None):
self.tags = []
self.name = name
self.clazz = clazz
self.params = params
self.body = body
self.coord = coord
def children(self):
nodelist = []
if self.params is not None: nodelist.append(("params", self.params))
if self.body is not None: nodelist.append(("body", self.body))
return tuple(nodelist)
attr_names = ('name','clazz',)
class Quantor(Node):
def __init__(self, name, iterators, cond, coord=None):
self.tags = []
self.name = name
self.iterators = iterators
self.cond = cond
self.coord = coord
def children(self):
nodelist = []
if self.iterators is not None: nodelist.append(("iterators", self.iterators))
if self.cond is not None: nodelist.append(("cond", self.cond))
return tuple(nodelist)
attr_names = ('name',)
class Range (Node):
def __init__(self, klass, a, b, c, coord=None):
self.tags = []
self.klass = klass
self.a = a
self.b = b
self.c = c
self.coord = coord
def children(self):
nodelist = []
if self.a is not None: nodelist.append(("a", self.a))
if self.b is not None: nodelist.append(("b", self.b))
if self.c is not None: nodelist.append(("c", self.c))
return tuple(nodelist)
attr_names = ('klass',)
class Regex (Node):
def __init__(self, expr, as_expr, cond, block, coord=None):
self.tags = []
self.expr = expr
self.as_expr = as_expr
self.cond = cond
self.block = block
self.coord = coord
def children(self):
nodelist = []
if self.expr is not None: nodelist.append(("expr", self.expr))
if self.as_expr is not None: nodelist.append(("as_expr", self.as_expr))
if self.cond is not None: nodelist.append(("cond", self.cond))
if self.block is not None: nodelist.append(("block", self.block))
return tuple(nodelist)
attr_names = ()
class Return (Node):
def __init__(self, expr, coord=None):
self.tags = []
self.expr = expr
self.coord = coord
def children(self):
nodelist = []
if self.expr is not None: nodelist.append(("expr", self.expr))
return tuple(nodelist)
attr_names = ()
class Scan(Node):
def __init__(self, expr, using, regex_list, default, coord=None):
self.tags = []
self.expr = expr
self.using = using
self.regex_list = regex_list
self.default = default
self.coord = coord
def children(self):
nodelist = []
if self.expr is not None: nodelist.append(("expr", self.expr))
if self.using is not None: nodelist.append(("using", self.using))
if self.regex_list is not None: nodelist.append(("regex_list", self.regex_list))
if self.default is not None: nodelist.append(("default", self.default))
return tuple(nodelist)
attr_names = ()
class Set(Node):
def __init__(self, items, coord=None):
self.tags = []
self.items = items
self.coord = coord
def children(self):
nodelist = []
for i, child in enumerate(self.items or []):
nodelist.append(("items[%d]" % i, child))
return tuple(nodelist)
attr_names = ()
class Slice (Node):
def __init__(self, obj, lower, upper, coord=None):
self.tags = []
self.obj = obj
self.lower = lower
self.upper = upper
self.coord = coord
def children(self):
nodelist = []
if self.obj is not None: nodelist.append(("obj", self.obj))
if self.lower is not None: nodelist.append(("lower", self.lower))
if self.upper is not None: nodelist.append(("upper", self.upper))
return tuple(nodelist)
attr_names = ()
class Subscription(Node):
def __init__(self, obj, subscript, coord=None):
self.tags = []
self.obj = obj
self.subscript = subscript
self.coord = coord
def children(self):
nodelist = []
if self.obj is not None: nodelist.append(("obj", self.obj))
if self.subscript is not None: nodelist.append(("subscript", self.subscript))
return tuple(nodelist)
attr_names = ()
class Switch (Node):
def __init__(self, case_list, default, coord=None):
self.tags = []
self.case_list = case_list
self.default = default
self.coord = coord
def children(self):
nodelist = []
if self.case_list is not None: nodelist.append(("case_list", self.case_list))
if self.default is not None: nodelist.append(("default", self.default))
return tuple(nodelist)
attr_names = ()
class Term(Node):
def __init__(self, name, args, coord=None):
self.tags = []
self.name = name
self.args = args
self.coord = coord
def children(self):
nodelist = []
if self.args is not None: nodelist.append(("args", self.args))
return tuple(nodelist)
attr_names = ('name',)
class Try(Node):
def __init__(self, block, catches, coord=None):
self.tags = []
self.block = block
self.catches = catches
self.coord = coord
def children(self):
nodelist = []
if self.block is not None: nodelist.append(("block", self.block))
if self.catches is not None: nodelist.append(("catches", self.catches))
return tuple(nodelist)
attr_names = ()
class UnaryOp(Node):
def __init__(self, op, expr, coord=None):
self.tags = []
self.op = op
self.expr = expr
self.coord = coord
def children(self):
nodelist = []
if self.expr is not None: nodelist.append(("expr", self.expr))
return tuple(nodelist)
attr_names = ('op',)
class While(Node):
def __init__(self, cond, body, coord=None):
self.tags = []
self.cond = cond
self.body = body
self.coord = coord
def children(self):
nodelist = []
if self.cond is not None: nodelist.append(("cond", self.cond))
if self.body is not None: nodelist.append(("body", self.body))
return tuple(nodelist)
attr_names = () | setlx2py/setlx_ast.py |
import sys
class Node(object):
""" Abstract base class for AST nodes.
"""
def children(self):
""" A sequence of all children that are Nodes
"""
pass
def __str__(self):
return self.show()
def __repr__(self):
return str(self.to_tuples())
def to_tuples(self):
result = [self.__class__.__name__]
attr_list = [getattr(self, n) for n in self.attr_names]
result.extend(attr_list)
for (child_name, child) in self.children():
result.append( child.to_tuples() )
return tuple(result)
def show(self,
buf=None,
offset=0,
attrnames=False,
nodenames=False,
showcoord=False,
_my_node_name=None):
""" Pretty print the Node and all its attributes and
children (recursively) to a buffer.
buf:
Open IO buffer into which the Node is printed.
If it is None or let empty, instead a string
is returned
offset:
Initial offset (amount of leading spaces)
attrnames:
True if you want to see the attribute names in
name=value pairs. False to only see the values.
nodenames:
True if you want to see the actual node names
within their parents.
showcoord:
Do you want the coordinates of each Node to be
displayed.
"""
s = ''
lead = ' ' * offset
if nodenames and _my_node_name is not None:
s += lead + self.__class__.__name__+ ' <' + _my_node_name + '>: '
else:
s += lead + self.__class__.__name__+ ': '
if self.attr_names:
if attrnames:
nvlist = [(n, getattr(self,n)) for n in self.attr_names]
attrstr = ', '.join('%s=%s' % nv for nv in nvlist)
else:
vlist = [getattr(self, n) for n in self.attr_names]
attrstr = ', '.join('%s' % v for v in vlist)
s += attrstr
if showcoord: s += ' (at %s)' % self.coord
s += '\n'
for (child_name, child) in self.children():
s += child.show(
buf,
offset=offset + 2,
attrnames=attrnames,
nodenames=nodenames,
showcoord=showcoord,
_my_node_name=child_name)
if buf is None: return s
else: buf.write(s)
class NodeVisitor(object):
""" A base NodeVisitor class for visiting c_ast nodes.
Subclass it and define your own visit_XXX methods, where
XXX is the class name you want to visit with these
methods.
For example:
class ConstantVisitor(NodeVisitor):
def __init__(self):
self.values = []
def visit_Constant(self, node):
self.values.append(node.value)
Creates a list of values of all the constant nodes
encountered below the given node. To use it:
cv = ConstantVisitor()
cv.visit(node)
Notes:
* generic_visit() will be called for AST nodes for which
no visit_XXX method was defined.
* The children of nodes for which a visit_XXX was
defined will not be visited - if you need this, call
generic_visit() on the node.
You can use:
NodeVisitor.generic_visit(self, node)
* Modeled after Python's own AST visiting facilities
(the ast module of Python 3.0)
"""
def visit(self, node):
""" Visit a node.
"""
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node)
def generic_visit(self, node):
""" Called if no explicit visitor function exists for a
node. Implements preorder visiting of the node.
"""
for c_name, c in node.children():
self.visit(c)
class As (Node):
def __init__(self, expr, coord=None):
self.tags = []
self.expr = expr
self.coord = coord
def children(self):
nodelist = []
if self.expr is not None: nodelist.append(("expr", self.expr))
return tuple(nodelist)
attr_names = ()
class Assert(Node):
def __init__(self, cond, expr, coord=None):
self.tags = []
self.cond = cond
self.expr = expr
self.coord = coord
def children(self):
nodelist = []
if self.cond is not None: nodelist.append(("cond", self.cond))
if self.expr is not None: nodelist.append(("expr", self.expr))
return tuple(nodelist)
attr_names = ()
class ArgumentList (Node):
def __init__(self, arguments, coord=None):
self.tags = []
self.arguments = arguments
self.coord = coord
def children(self):
nodelist = []
for i, child in enumerate(self.arguments or []):
nodelist.append(("arguments[%d]" % i, child))
return tuple(nodelist)
attr_names = ()
class Assignment(Node):
def __init__(self, op, target, right, coord=None):
self.tags = []
self.op = op
self.target = target
self.right = right
self.coord = coord
def children(self):
nodelist = []
if self.target is not None: nodelist.append(("target", self.target))
if self.right is not None: nodelist.append(("right", self.right))
return tuple(nodelist)
attr_names = ('op',)
class AttributeRef (Node):
def __init__(self, obj, field, coord=None):
self.tags = []
self.obj = obj
self.field = field
self.coord = coord
def children(self):
nodelist = []
if self.obj is not None: nodelist.append(("obj", self.obj))
if self.field is not None: nodelist.append(("field", self.field))
return tuple(nodelist)
attr_names = ()
class BinaryOp(Node):
def __init__(self, op, left, right, coord=None):
self.tags = []
self.op = op
self.left = left
self.right = right
self.coord = coord
def children(self):
nodelist = []
if self.left is not None: nodelist.append(("left", self.left))
if self.right is not None: nodelist.append(("right", self.right))
return tuple(nodelist)
attr_names = ('op',)
class Backtrack(Node):
def __init__(self, coord=None):
self.tags = []
self.coord = coord
def children(self):
return ()
attr_names = ()
class Block(Node):
def __init__(self, stmts, coord=None):
self.tags = []
self.stmts = stmts
self.coord = coord
def children(self):
nodelist = []
for i, child in enumerate(self.stmts or []):
nodelist.append(("stmts[%d]" % i, child))
return tuple(nodelist)
attr_names = ()
class Break(Node):
def __init__(self, coord=None):
self.tags = []
self.coord = coord
def children(self):
return ()
attr_names = ()
class Call (Node):
def __init__(self, name, args, coord=None):
self.tags = []
self.name = name
self.args = args
self.coord = coord
def children(self):
nodelist = []
if self.name is not None: nodelist.append(("name", self.name))
if self.args is not None: nodelist.append(("args", self.args))
return tuple(nodelist)
attr_names = ()
class Case(Node):
def __init__(self, cond, body, coord=None):
self.tags = []
self.cond = cond
self.body = body
self.coord = coord
def children(self):
nodelist = []
if self.cond is not None: nodelist.append(("cond", self.cond))
if self.body is not None: nodelist.append(("body", self.body))
return tuple(nodelist)
attr_names = ()
class CaseList (Node):
def __init__(self, cases, coord=None):
self.tags = []
self.cases = cases
self.coord = coord
def children(self):
nodelist = []
for i, child in enumerate(self.cases or []):
nodelist.append(("cases[%d]" % i, child))
return tuple(nodelist)
attr_names = ()
class CatchClause(Node):
def __init__(self, type, name, block, coord=None):
self.tags = []
self.type = type
self.name = name
self.block = block
self.coord = coord
def children(self):
nodelist = []
if self.name is not None: nodelist.append(("name", self.name))
if self.block is not None: nodelist.append(("block", self.block))
return tuple(nodelist)
attr_names = ('type',)
class Catches (Node):
def __init__(self, clauses, coord=None):
self.tags = []
self.clauses = clauses
self.coord = coord
def children(self):
nodelist = []
for i, child in enumerate(self.clauses or []):
nodelist.append(("clauses[%d]" % i, child))
return tuple(nodelist)
attr_names = ()
class Check(Node):
def __init__(self, block, coord=None):
self.tags = []
self.block = block
self.coord = coord
def children(self):
nodelist = []
if self.block is not None: nodelist.append(("block", self.block))
return tuple(nodelist)
attr_names = ()
class Class(Node):
def __init__(self, name, params, block, static, coord=None):
self.tags = []
self.name = name
self.params = params
self.block = block
self.static = static
self.coord = coord
def children(self):
nodelist = []
if self.name is not None: nodelist.append(("name", self.name))
if self.params is not None: nodelist.append(("params", self.params))
if self.block is not None: nodelist.append(("block", self.block))
if self.static is not None: nodelist.append(("static", self.static))
return tuple(nodelist)
attr_names = ()
class Comprehension (Node):
def __init__(self, klass, expr, iterators, cond, coord=None):
self.tags = []
self.klass = klass
self.expr = expr
self.iterators = iterators
self.cond = cond
self.coord = coord
def children(self):
nodelist = []
if self.expr is not None: nodelist.append(("expr", self.expr))
if self.iterators is not None: nodelist.append(("iterators", self.iterators))
if self.cond is not None: nodelist.append(("cond", self.cond))
return tuple(nodelist)
attr_names = ('klass',)
class Continue(Node):
def __init__(self, coord=None):
self.tags = []
self.coord = coord
def children(self):
return ()
attr_names = ()
class Constant(Node):
def __init__(self, klass, value, coord=None):
self.tags = []
self.klass = klass
self.value = value
self.coord = coord
def children(self):
nodelist = []
return tuple(nodelist)
attr_names = ('klass','value',)
class Default(Node):
def __init__(self, body, coord=None):
self.tags = []
self.body = body
self.coord = coord
def children(self):
nodelist = []
if self.body is not None: nodelist.append(("body", self.body))
return tuple(nodelist)
attr_names = ()
class DoWhile(Node):
def __init__(self, cond, body, coord=None):
self.tags = []
self.cond = cond
self.body = body
self.coord = coord
def children(self):
nodelist = []
if self.cond is not None: nodelist.append(("cond", self.cond))
if self.body is not None: nodelist.append(("body", self.body))
return tuple(nodelist)
attr_names = ()
class Exit(Node):
def __init__(self, coord=None):
self.tags = []
self.coord = coord
def children(self):
return ()
attr_names = ()
class ExprList(Node):
def __init__(self, exprs, coord=None):
self.tags = []
self.exprs = exprs
self.coord = coord
def children(self):
nodelist = []
for i, child in enumerate(self.exprs or []):
nodelist.append(("exprs[%d]" % i, child))
return tuple(nodelist)
attr_names = ()
class FileAST(Node):
def __init__(self, stmts, coord=None):
self.tags = []
self.stmts = stmts
self.coord = coord
def children(self):
nodelist = []
for i, child in enumerate(self.stmts or []):
nodelist.append(("stmts[%d]" % i, child))
return tuple(nodelist)
attr_names = ()
class For(Node):
def __init__(self, iterators, body, coord=None):
self.tags = []
self.iterators = iterators
self.body = body
self.coord = coord
def children(self):
nodelist = []
if self.iterators is not None: nodelist.append(("iterators", self.iterators))
if self.body is not None: nodelist.append(("body", self.body))
return tuple(nodelist)
attr_names = ()
class Identifier(Node):
def __init__(self, name, coord=None):
self.tags = []
self.name = name
self.coord = coord
def children(self):
nodelist = []
return tuple(nodelist)
attr_names = ('name',)
class If(Node):
def __init__(self, cond, iftrue, iffalse, coord=None):
self.tags = []
self.cond = cond
self.iftrue = iftrue
self.iffalse = iffalse
self.coord = coord
def children(self):
nodelist = []
if self.cond is not None: nodelist.append(("cond", self.cond))
if self.iftrue is not None: nodelist.append(("iftrue", self.iftrue))
if self.iffalse is not None: nodelist.append(("iffalse", self.iffalse))
return tuple(nodelist)
attr_names = ()
class Interpolation(Node):
def __init__(self, format_string, expressions, coord=None):
self.tags = []
self.format_string = format_string
self.expressions = expressions
self.coord = coord
def children(self):
nodelist = []
if self.format_string is not None: nodelist.append(("format_string", self.format_string))
if self.expressions is not None: nodelist.append(("expressions", self.expressions))
return tuple(nodelist)
attr_names = ()
class Iterator(Node):
def __init__(self, assignable, expression, coord=None):
self.tags = []
self.assignable = assignable
self.expression = expression
self.coord = coord
def children(self):
nodelist = []
if self.assignable is not None: nodelist.append(("assignable", self.assignable))
if self.expression is not None: nodelist.append(("expression", self.expression))
return tuple(nodelist)
attr_names = ()
class IteratorChain(Node):
def __init__(self, mode, iterators, coord=None):
self.tags = []
self.mode = mode
self.iterators = iterators
self.coord = coord
def children(self):
nodelist = []
for i, child in enumerate(self.iterators or []):
nodelist.append(("iterators[%d]" % i, child))
return tuple(nodelist)
attr_names = ('mode',)
class Lambda(Node):
def __init__(self, params, body, coord=None):
self.tags = []
self.params = params
self.body = body
self.coord = coord
def children(self):
nodelist = []
if self.params is not None: nodelist.append(("params", self.params))
if self.body is not None: nodelist.append(("body", self.body))
return tuple(nodelist)
attr_names = ()
class List(Node):
def __init__(self, items, coord=None):
self.tags = []
self.items = items
self.coord = coord
def children(self):
nodelist = []
for i, child in enumerate(self.items or []):
nodelist.append(("items[%d]" % i, child))
return tuple(nodelist)
attr_names = ()
class Match(Node):
def __init__(self, matchee, case_list, default, coord=None):
self.tags = []
self.matchee = matchee
self.case_list = case_list
self.default = default
self.coord = coord
def children(self):
nodelist = []
if self.matchee is not None: nodelist.append(("matchee", self.matchee))
if self.case_list is not None: nodelist.append(("case_list", self.case_list))
if self.default is not None: nodelist.append(("default", self.default))
return tuple(nodelist)
attr_names = ()
class MatchCase(Node):
def __init__(self, pattern, cond, body, coord=None):
self.tags = []
self.pattern = pattern
self.cond = cond
self.body = body
self.coord = coord
def children(self):
nodelist = []
if self.pattern is not None: nodelist.append(("pattern", self.pattern))
if self.cond is not None: nodelist.append(("cond", self.cond))
if self.body is not None: nodelist.append(("body", self.body))
return tuple(nodelist)
attr_names = ()
class Param (Node):
def __init__(self, name, coord=None):
self.tags = []
self.name = name
self.coord = coord
def children(self):
nodelist = []
return tuple(nodelist)
attr_names = ('name',)
class ParamList (Node):
def __init__(self, params, coord=None):
self.tags = []
self.params = params
self.coord = coord
def children(self):
nodelist = []
for i, child in enumerate(self.params or []):
nodelist.append(("params[%d]" % i, child))
return tuple(nodelist)
attr_names = ()
class Pattern(Node):
def __init__(self, head, tail, coord=None):
self.tags = []
self.head = head
self.tail = tail
self.coord = coord
def children(self):
nodelist = []
if self.head is not None: nodelist.append(("head", self.head))
if self.tail is not None: nodelist.append(("tail", self.tail))
return tuple(nodelist)
attr_names = ()
class Procedure(Node):
def __init__(self, name, clazz, params, body, coord=None):
self.tags = []
self.name = name
self.clazz = clazz
self.params = params
self.body = body
self.coord = coord
def children(self):
nodelist = []
if self.params is not None: nodelist.append(("params", self.params))
if self.body is not None: nodelist.append(("body", self.body))
return tuple(nodelist)
attr_names = ('name','clazz',)
class Quantor(Node):
def __init__(self, name, iterators, cond, coord=None):
self.tags = []
self.name = name
self.iterators = iterators
self.cond = cond
self.coord = coord
def children(self):
nodelist = []
if self.iterators is not None: nodelist.append(("iterators", self.iterators))
if self.cond is not None: nodelist.append(("cond", self.cond))
return tuple(nodelist)
attr_names = ('name',)
class Range (Node):
def __init__(self, klass, a, b, c, coord=None):
self.tags = []
self.klass = klass
self.a = a
self.b = b
self.c = c
self.coord = coord
def children(self):
nodelist = []
if self.a is not None: nodelist.append(("a", self.a))
if self.b is not None: nodelist.append(("b", self.b))
if self.c is not None: nodelist.append(("c", self.c))
return tuple(nodelist)
attr_names = ('klass',)
class Regex (Node):
def __init__(self, expr, as_expr, cond, block, coord=None):
self.tags = []
self.expr = expr
self.as_expr = as_expr
self.cond = cond
self.block = block
self.coord = coord
def children(self):
nodelist = []
if self.expr is not None: nodelist.append(("expr", self.expr))
if self.as_expr is not None: nodelist.append(("as_expr", self.as_expr))
if self.cond is not None: nodelist.append(("cond", self.cond))
if self.block is not None: nodelist.append(("block", self.block))
return tuple(nodelist)
attr_names = ()
class Return (Node):
def __init__(self, expr, coord=None):
self.tags = []
self.expr = expr
self.coord = coord
def children(self):
nodelist = []
if self.expr is not None: nodelist.append(("expr", self.expr))
return tuple(nodelist)
attr_names = ()
class Scan(Node):
def __init__(self, expr, using, regex_list, default, coord=None):
self.tags = []
self.expr = expr
self.using = using
self.regex_list = regex_list
self.default = default
self.coord = coord
def children(self):
nodelist = []
if self.expr is not None: nodelist.append(("expr", self.expr))
if self.using is not None: nodelist.append(("using", self.using))
if self.regex_list is not None: nodelist.append(("regex_list", self.regex_list))
if self.default is not None: nodelist.append(("default", self.default))
return tuple(nodelist)
attr_names = ()
class Set(Node):
def __init__(self, items, coord=None):
self.tags = []
self.items = items
self.coord = coord
def children(self):
nodelist = []
for i, child in enumerate(self.items or []):
nodelist.append(("items[%d]" % i, child))
return tuple(nodelist)
attr_names = ()
class Slice (Node):
def __init__(self, obj, lower, upper, coord=None):
self.tags = []
self.obj = obj
self.lower = lower
self.upper = upper
self.coord = coord
def children(self):
nodelist = []
if self.obj is not None: nodelist.append(("obj", self.obj))
if self.lower is not None: nodelist.append(("lower", self.lower))
if self.upper is not None: nodelist.append(("upper", self.upper))
return tuple(nodelist)
attr_names = ()
class Subscription(Node):
def __init__(self, obj, subscript, coord=None):
self.tags = []
self.obj = obj
self.subscript = subscript
self.coord = coord
def children(self):
nodelist = []
if self.obj is not None: nodelist.append(("obj", self.obj))
if self.subscript is not None: nodelist.append(("subscript", self.subscript))
return tuple(nodelist)
attr_names = ()
class Switch (Node):
def __init__(self, case_list, default, coord=None):
self.tags = []
self.case_list = case_list
self.default = default
self.coord = coord
def children(self):
nodelist = []
if self.case_list is not None: nodelist.append(("case_list", self.case_list))
if self.default is not None: nodelist.append(("default", self.default))
return tuple(nodelist)
attr_names = ()
class Term(Node):
def __init__(self, name, args, coord=None):
self.tags = []
self.name = name
self.args = args
self.coord = coord
def children(self):
nodelist = []
if self.args is not None: nodelist.append(("args", self.args))
return tuple(nodelist)
attr_names = ('name',)
class Try(Node):
def __init__(self, block, catches, coord=None):
self.tags = []
self.block = block
self.catches = catches
self.coord = coord
def children(self):
nodelist = []
if self.block is not None: nodelist.append(("block", self.block))
if self.catches is not None: nodelist.append(("catches", self.catches))
return tuple(nodelist)
attr_names = ()
class UnaryOp(Node):
def __init__(self, op, expr, coord=None):
self.tags = []
self.op = op
self.expr = expr
self.coord = coord
def children(self):
nodelist = []
if self.expr is not None: nodelist.append(("expr", self.expr))
return tuple(nodelist)
attr_names = ('op',)
class While(Node):
def __init__(self, cond, body, coord=None):
self.tags = []
self.cond = cond
self.body = body
self.coord = coord
def children(self):
nodelist = []
if self.cond is not None: nodelist.append(("cond", self.cond))
if self.body is not None: nodelist.append(("body", self.body))
return tuple(nodelist)
attr_names = () | 0.54698 | 0.231614 |
import networkx as nx
assert int(nx.__version__.split('.')[0]) >= 2
# This class is responsible for deadlock detecting using a "wait-for" dependency graph.
# In real-life case we might find a more-efficient solution, using properties of that
# graph (for example: out degree = 1).
class DeadlockDetector:
def __init__(self):
self._wait_for_graph = nx.DiGraph() # create a new directed graph (using networx lib).
@property
def wait_for_graph(self):
return self._wait_for_graph
# Returns whether a dependency cycle has been created because of this new waiting.
# If not, add the constrain to (add the matching edge to the graph).
# Add the edge and check if it creates deadlock-cycle.
# If so, remove edge and return such a cycle; Otherwise return None.
def wait_for(self, waiting_transaction_id, waiting_for_transaction_id):
if not(self._wait_for_graph.has_node(waiting_transaction_id)):
self._wait_for_graph.add_node(waiting_transaction_id)
if not(self._wait_for_graph.has_node(waiting_for_transaction_id)):
self._wait_for_graph.add_node(waiting_for_transaction_id)
self._wait_for_graph.add_edge(waiting_transaction_id, waiting_for_transaction_id)
deadlock_cycle = self.find_deadlock_cycle()
if deadlock_cycle is not None:
self._wait_for_graph.remove_edge(waiting_transaction_id, waiting_for_transaction_id)
return deadlock_cycle
return None
# Delete this transaction and the relevant edges when a certain transaction ends.
def transaction_ended(self, ended_transaction_id):
if self._wait_for_graph.has_node(ended_transaction_id):
# should remove all the connected edges to the ended_transaction_id
self._wait_for_graph.remove_node(ended_transaction_id)
# Checks whether there is a cycle in the graph. If so, returns such a cycle; otherwise returns None.
def find_deadlock_cycle(self):
try:
cycle = nx.find_cycle(self._wait_for_graph, orientation='original')
return cycle
except nx.NetworkXNoCycle:
return None | hw2-romv-scheduler/deadlock_detector.py | import networkx as nx
assert int(nx.__version__.split('.')[0]) >= 2
# This class is responsible for deadlock detecting using a "wait-for" dependency graph.
# In real-life case we might find a more-efficient solution, using properties of that
# graph (for example: out degree = 1).
class DeadlockDetector:
def __init__(self):
self._wait_for_graph = nx.DiGraph() # create a new directed graph (using networx lib).
@property
def wait_for_graph(self):
return self._wait_for_graph
# Returns whether a dependency cycle has been created because of this new waiting.
# If not, add the constrain to (add the matching edge to the graph).
# Add the edge and check if it creates deadlock-cycle.
# If so, remove edge and return such a cycle; Otherwise return None.
def wait_for(self, waiting_transaction_id, waiting_for_transaction_id):
if not(self._wait_for_graph.has_node(waiting_transaction_id)):
self._wait_for_graph.add_node(waiting_transaction_id)
if not(self._wait_for_graph.has_node(waiting_for_transaction_id)):
self._wait_for_graph.add_node(waiting_for_transaction_id)
self._wait_for_graph.add_edge(waiting_transaction_id, waiting_for_transaction_id)
deadlock_cycle = self.find_deadlock_cycle()
if deadlock_cycle is not None:
self._wait_for_graph.remove_edge(waiting_transaction_id, waiting_for_transaction_id)
return deadlock_cycle
return None
# Delete this transaction and the relevant edges when a certain transaction ends.
def transaction_ended(self, ended_transaction_id):
if self._wait_for_graph.has_node(ended_transaction_id):
# should remove all the connected edges to the ended_transaction_id
self._wait_for_graph.remove_node(ended_transaction_id)
# Checks whether there is a cycle in the graph. If so, returns such a cycle; otherwise returns None.
def find_deadlock_cycle(self):
try:
cycle = nx.find_cycle(self._wait_for_graph, orientation='original')
return cycle
except nx.NetworkXNoCycle:
return None | 0.797241 | 0.458046 |
import json
from common.logger import get_logger
from constants.entity import EventConsumerEntity, EthereumEventConsumerEntities, CardanoEventConsumer, \
ConverterBridgeEntities
from constants.error_details import ErrorCode, ErrorDetails
from constants.general import BlockchainName
from utils.exceptions import InternalServerErrorException
logger = get_logger(__name__)
def format_ethereum_event(event) -> list:
new_format = []
name = event.get(EthereumEventConsumerEntities.NAME.value, None)
data = event.get(EthereumEventConsumerEntities.DATA.value, None)
if name and data:
new_format.append(consumer_required_format(blockchain_name=BlockchainName.ETHEREUM.value,
blockchain_event=event))
return new_format
def convert_consumer_event(event) -> list:
new_format = []
records = event.get(CardanoEventConsumer.RECORDS.value, [])
try:
if records:
for record in records:
body = record.get(CardanoEventConsumer.BODY.value)
if body:
parsed_body = json.loads(body)
message = parsed_body.get(CardanoEventConsumer.MESSAGE.value)
if message:
parsed_message = json.loads(message)
new_format.append(consumer_required_format(blockchain_name=BlockchainName.CARDANO.value,
blockchain_event=parsed_message))
else:
new_format.append(parsed_body)
except Exception as e:
logger.info(f"Error while trying to parse the input={json.dumps(event)} with error of {e}")
raise InternalServerErrorException(error_code=ErrorCode.UNABLE_TO_PARSE_THE_INPUT_EVENT.value,
error_details=ErrorDetails[
ErrorCode.UNABLE_TO_PARSE_THE_INPUT_EVENT.value].value)
return new_format
def consumer_required_format(blockchain_name, blockchain_event):
return {
EventConsumerEntity.BLOCKCHAIN_NAME.value: blockchain_name,
EventConsumerEntity.BLOCKCHAIN_EVENT.value: blockchain_event
}
def convert_converter_bridge_event(event) -> list:
new_format = []
records = event.get(ConverterBridgeEntities.RECORDS.value, [])
try:
for record in records:
body = record.get(ConverterBridgeEntities.BODY.value)
if body:
parsed_body = json.loads(body)
new_format.append(parsed_body)
except Exception as e:
logger.info(f"Error while trying to parse the input={json.dumps(event)} with error of {e}")
raise InternalServerErrorException(error_code=ErrorCode.UNABLE_TO_PARSE_THE_INPUT_EVENT.value,
error_details=ErrorDetails[
ErrorCode.UNABLE_TO_PARSE_THE_INPUT_EVENT.value].value)
return new_format | application/factory/consumer_factory.py | import json
from common.logger import get_logger
from constants.entity import EventConsumerEntity, EthereumEventConsumerEntities, CardanoEventConsumer, \
ConverterBridgeEntities
from constants.error_details import ErrorCode, ErrorDetails
from constants.general import BlockchainName
from utils.exceptions import InternalServerErrorException
logger = get_logger(__name__)
def format_ethereum_event(event) -> list:
new_format = []
name = event.get(EthereumEventConsumerEntities.NAME.value, None)
data = event.get(EthereumEventConsumerEntities.DATA.value, None)
if name and data:
new_format.append(consumer_required_format(blockchain_name=BlockchainName.ETHEREUM.value,
blockchain_event=event))
return new_format
def convert_consumer_event(event) -> list:
new_format = []
records = event.get(CardanoEventConsumer.RECORDS.value, [])
try:
if records:
for record in records:
body = record.get(CardanoEventConsumer.BODY.value)
if body:
parsed_body = json.loads(body)
message = parsed_body.get(CardanoEventConsumer.MESSAGE.value)
if message:
parsed_message = json.loads(message)
new_format.append(consumer_required_format(blockchain_name=BlockchainName.CARDANO.value,
blockchain_event=parsed_message))
else:
new_format.append(parsed_body)
except Exception as e:
logger.info(f"Error while trying to parse the input={json.dumps(event)} with error of {e}")
raise InternalServerErrorException(error_code=ErrorCode.UNABLE_TO_PARSE_THE_INPUT_EVENT.value,
error_details=ErrorDetails[
ErrorCode.UNABLE_TO_PARSE_THE_INPUT_EVENT.value].value)
return new_format
def consumer_required_format(blockchain_name, blockchain_event):
return {
EventConsumerEntity.BLOCKCHAIN_NAME.value: blockchain_name,
EventConsumerEntity.BLOCKCHAIN_EVENT.value: blockchain_event
}
def convert_converter_bridge_event(event) -> list:
new_format = []
records = event.get(ConverterBridgeEntities.RECORDS.value, [])
try:
for record in records:
body = record.get(ConverterBridgeEntities.BODY.value)
if body:
parsed_body = json.loads(body)
new_format.append(parsed_body)
except Exception as e:
logger.info(f"Error while trying to parse the input={json.dumps(event)} with error of {e}")
raise InternalServerErrorException(error_code=ErrorCode.UNABLE_TO_PARSE_THE_INPUT_EVENT.value,
error_details=ErrorDetails[
ErrorCode.UNABLE_TO_PARSE_THE_INPUT_EVENT.value].value)
return new_format | 0.383757 | 0.083516 |
import numpy as np
from numpy import pi
def spec_var(model, ph):
"""Compute variance of ``p`` from Fourier coefficients ``ph``.
Parameters
----------
model : pyqg.Model instance
The model object from which `ph` originates
ph : complex array
The field on which to compute the variance
Returns
-------
var_dens : float
The variance of `ph`
"""
var_dens = 2. * np.abs(ph)**2 / model.M**2
# only half of coefs [0] and [nx/2+1] due to symmetry in real fft2
var_dens[...,0] /= 2
var_dens[...,-1] /= 2
return var_dens.sum(axis=(-1,-2))
def spec_sum(ph2):
"""Compute total spectral sum of the real spectral quantity``ph^2``.
Parameters
----------
model : pyqg.Model instance
The model object from which `ph` originates
ph2 : real array
The field on which to compute the sum
Returns
-------
var_dens : float
The sum of `ph2`
"""
ph2 = 2.*ph2
ph2[...,0] = ph2[...,0]/2.
ph2[...,-1] = ph2[...,-1]/2.
return ph2.sum(axis=(-1,-2))
def calc_ispec(model, _var_dens, averaging = True, truncate=True, nd_wavenumber=False, nfactor = 1):
"""Compute isotropic spectrum `phr` from 2D spectrum of variable `signal2d`
such that `signal2d.var() = phr.sum() * (kr[1] - kr[0])`.
Parameters
----------
model : pyqg.Model instance
The model object from which `var_dens` originates
var_dens : squared modulus of fourier coefficients like this:
`np.abs(signal2d_fft)**2/m.M**2`
averaging: If True, spectral density is estimated with averaging over circles,
otherwise summation is used and Parseval identity holds
truncate: If True, maximum wavenumber corresponds to inner circle in Fourier space,
otherwise - outer circle
nd_wavenumber: If True, wavenumber is nondimensional:
minimum wavenumber is 1 and corresponds to domain length/width,
otherwise - wavenumber is dimensional [m^-1]
nfactor: width of the bin in sqrt(dk^2+dl^2) units
Returns
-------
kr : array
isotropic wavenumber
phr : array
isotropic spectrum
"""
# account for complex conjugate
var_dens = np.copy(_var_dens)
var_dens[...,0] /= 2
var_dens[...,-1] /= 2
ll_max = np.abs(model.ll).max()
kk_max = np.abs(model.kk).max()
if truncate:
kmax = np.minimum(ll_max, kk_max)
else:
kmax = np.sqrt(ll_max**2 + kk_max**2)
kmin = 0
dkr = np.sqrt(model.dk**2 + model.dl**2) * nfactor
# left border of bins
kr = np.arange(kmin, kmax, dkr)
phr = np.zeros(kr.size)
for i in range(kr.size):
if i == kr.size-1:
fkr = (model.wv>=kr[i]) & (model.wv<=kr[i]+dkr)
else:
fkr = (model.wv>=kr[i]) & (model.wv<kr[i+1])
if averaging:
phr[i] = var_dens[fkr].mean() * (kr[i]+dkr/2) * pi / (model.dk * model.dl)
else:
phr[i] = var_dens[fkr].sum() / dkr
phr[i] *= 2 # include full circle
# convert left border of the bin to center
kr = kr + dkr/2
# convert to non-dimensional wavenumber
# preserving integral over spectrum
if nd_wavenumber:
kr = kr / kmin
phr = phr * kmin
return kr, phr
def diagnostic_differences(m1, m2, reduction='rmse', instantaneous=False):
"""Compute a dictionary of differences in the diagnostics of two models at
possibly different resolutions (e.g. for quantifying the effects of
parameterizations). Applies normalization/isotropization to certain
diagnostics before comparing them and skips others. Also computes
differences for each vertical layer separately.
Parameters
----------
m1 : pyqg.Model instance
The first model to compare
m2 : pyqg.Model instance
The second model to compare
reduction : string or function
A function that takes two arrays of diagnostics and computes a distance
metric. Defaults to the root mean squared difference ('rmse').
instantaneous : boolean
If true, compute difference metrics for the instantaneous values of a
diagnostic, rather than its time average. Defaults to false.
Returns
-------
diffs : dict
A dictionary of diagnostic name => distance. If the diagnostic is
defined over multiple layers, separate keys are included with an
appended z index.
"""
diffs = {}
# Compute the minimum common wavenumber in case we're comparing two
# models with different resolutions
kr1, _ = calc_ispec(m1, m1.diagnostics['KEspec']['function'](m1)[0])
kr2, _ = calc_ispec(m2, m2.diagnostics['KEspec']['function'](m2)[0])
min_kr_length = min(len(kr1), len(kr2))
# Helper to get a normalized version of diagnostics
def get_normalized_diagnostic(model, diag_name, layer=None):
# Get the raw diagnostic
attrs = model.diagnostics[diag_name]
if instantaneous:
diag = attrs['function'](model)
else:
diag = model.get_diagnostic(diag_name)
# Check if we need to add other terms to this diagnostic (e.g.
# KEflux + paramspec_KEflux)
for diag_name2 in attrs.get('sums_with', []):
if instantaneous:
diag += model.diagnostics[diag_name2]['function'](model)
else:
diag += model.get_diagnostic(diag_name2)
# Potentially limit to a layer
if layer is not None:
diag = diag[layer]
# Potentially convert to isotropic spectrum, keeping only the
# wavenumbers common to both models
if attrs['dims'][-2:] == ('l','k'):
kr, diag = calc_ispec(model, diag)
diag = diag[:min_kr_length]
# Return the normalized diagnostic
return diag
# Loop through all diagnostics
for diag_name, attrs in m1.diagnostics.items():
# Skip diagnostics flagged as not for comparison (TODO: diagnostics
# should be objects and this should be a method, rather than a
# dictionary key)
if attrs.get('skip_comparison', False):
continue
# Skip diagnostics not present in the second model (usually not
# necessary)
if diag_name not in m2.diagnostics:
continue
# If we have multiple layers in this diagnostic, we want to consider
# them separately with different keys
if attrs['dims'][0] == 'lev':
layers = range(m1.nz)
elif attrs['dims'][0] == 'lev_mid':
layers = range(m1.nz - 1)
else:
layers = [None]
for layer in layers:
diag1 = get_normalized_diagnostic(m1, diag_name, layer)
diag2 = get_normalized_diagnostic(m2, diag_name, layer)
label = f"{diag_name}{'' if layer is None else layer+1}"
# Compute the error
if reduction == 'rmse':
diff = np.sqrt(np.mean((diag1-diag2)**2))
else:
diff = reduction(diag1, diag2)
diffs[label] = diff
return diffs
def diagnostic_similarities(model, target, baseline, **kw):
"""Like `diagnostic_differences`, but returning a dictionary of similarity
scores between negative infinity and 1 which quantify how much closer the
diagnostics of a given `model` are to a `target` with respect to a
`baseline`. Scores approach 1 when the distance between the model and the
target is small compared to the baseline and are negative when that
distance is greater.
Parameters
----------
model : pyqg.Model instance
The model for which we want to compute similiarity scores (e.g. a
parameterized low resolution model)
target : pyqg.Model instance
The target model (e.g. a high resolution model)
baseline : pyqg.Model instance
The baseline against which we check for improvement or degradation
(e.g. an unparameterized low resolution model)
Returns
-------
sims : dict
A dictionary of diagnostic name => similarity. If the diagnostic is
defined over multiple layers, separate keys are included with an
appended z index.
"""
d1 = diagnostic_differences(model, target, **kw)
d2 = diagnostic_differences(baseline, target, **kw)
sims = dict((k, 1-d1[k]/d2[k]) for k in d1.keys())
return sims | pyqg/diagnostic_tools.py |
import numpy as np
from numpy import pi
def spec_var(model, ph):
"""Compute variance of ``p`` from Fourier coefficients ``ph``.
Parameters
----------
model : pyqg.Model instance
The model object from which `ph` originates
ph : complex array
The field on which to compute the variance
Returns
-------
var_dens : float
The variance of `ph`
"""
var_dens = 2. * np.abs(ph)**2 / model.M**2
# only half of coefs [0] and [nx/2+1] due to symmetry in real fft2
var_dens[...,0] /= 2
var_dens[...,-1] /= 2
return var_dens.sum(axis=(-1,-2))
def spec_sum(ph2):
"""Compute total spectral sum of the real spectral quantity``ph^2``.
Parameters
----------
model : pyqg.Model instance
The model object from which `ph` originates
ph2 : real array
The field on which to compute the sum
Returns
-------
var_dens : float
The sum of `ph2`
"""
ph2 = 2.*ph2
ph2[...,0] = ph2[...,0]/2.
ph2[...,-1] = ph2[...,-1]/2.
return ph2.sum(axis=(-1,-2))
def calc_ispec(model, _var_dens, averaging = True, truncate=True, nd_wavenumber=False, nfactor = 1):
"""Compute isotropic spectrum `phr` from 2D spectrum of variable `signal2d`
such that `signal2d.var() = phr.sum() * (kr[1] - kr[0])`.
Parameters
----------
model : pyqg.Model instance
The model object from which `var_dens` originates
var_dens : squared modulus of fourier coefficients like this:
`np.abs(signal2d_fft)**2/m.M**2`
averaging: If True, spectral density is estimated with averaging over circles,
otherwise summation is used and Parseval identity holds
truncate: If True, maximum wavenumber corresponds to inner circle in Fourier space,
otherwise - outer circle
nd_wavenumber: If True, wavenumber is nondimensional:
minimum wavenumber is 1 and corresponds to domain length/width,
otherwise - wavenumber is dimensional [m^-1]
nfactor: width of the bin in sqrt(dk^2+dl^2) units
Returns
-------
kr : array
isotropic wavenumber
phr : array
isotropic spectrum
"""
# account for complex conjugate
var_dens = np.copy(_var_dens)
var_dens[...,0] /= 2
var_dens[...,-1] /= 2
ll_max = np.abs(model.ll).max()
kk_max = np.abs(model.kk).max()
if truncate:
kmax = np.minimum(ll_max, kk_max)
else:
kmax = np.sqrt(ll_max**2 + kk_max**2)
kmin = 0
dkr = np.sqrt(model.dk**2 + model.dl**2) * nfactor
# left border of bins
kr = np.arange(kmin, kmax, dkr)
phr = np.zeros(kr.size)
for i in range(kr.size):
if i == kr.size-1:
fkr = (model.wv>=kr[i]) & (model.wv<=kr[i]+dkr)
else:
fkr = (model.wv>=kr[i]) & (model.wv<kr[i+1])
if averaging:
phr[i] = var_dens[fkr].mean() * (kr[i]+dkr/2) * pi / (model.dk * model.dl)
else:
phr[i] = var_dens[fkr].sum() / dkr
phr[i] *= 2 # include full circle
# convert left border of the bin to center
kr = kr + dkr/2
# convert to non-dimensional wavenumber
# preserving integral over spectrum
if nd_wavenumber:
kr = kr / kmin
phr = phr * kmin
return kr, phr
def diagnostic_differences(m1, m2, reduction='rmse', instantaneous=False):
"""Compute a dictionary of differences in the diagnostics of two models at
possibly different resolutions (e.g. for quantifying the effects of
parameterizations). Applies normalization/isotropization to certain
diagnostics before comparing them and skips others. Also computes
differences for each vertical layer separately.
Parameters
----------
m1 : pyqg.Model instance
The first model to compare
m2 : pyqg.Model instance
The second model to compare
reduction : string or function
A function that takes two arrays of diagnostics and computes a distance
metric. Defaults to the root mean squared difference ('rmse').
instantaneous : boolean
If true, compute difference metrics for the instantaneous values of a
diagnostic, rather than its time average. Defaults to false.
Returns
-------
diffs : dict
A dictionary of diagnostic name => distance. If the diagnostic is
defined over multiple layers, separate keys are included with an
appended z index.
"""
diffs = {}
# Compute the minimum common wavenumber in case we're comparing two
# models with different resolutions
kr1, _ = calc_ispec(m1, m1.diagnostics['KEspec']['function'](m1)[0])
kr2, _ = calc_ispec(m2, m2.diagnostics['KEspec']['function'](m2)[0])
min_kr_length = min(len(kr1), len(kr2))
# Helper to get a normalized version of diagnostics
def get_normalized_diagnostic(model, diag_name, layer=None):
# Get the raw diagnostic
attrs = model.diagnostics[diag_name]
if instantaneous:
diag = attrs['function'](model)
else:
diag = model.get_diagnostic(diag_name)
# Check if we need to add other terms to this diagnostic (e.g.
# KEflux + paramspec_KEflux)
for diag_name2 in attrs.get('sums_with', []):
if instantaneous:
diag += model.diagnostics[diag_name2]['function'](model)
else:
diag += model.get_diagnostic(diag_name2)
# Potentially limit to a layer
if layer is not None:
diag = diag[layer]
# Potentially convert to isotropic spectrum, keeping only the
# wavenumbers common to both models
if attrs['dims'][-2:] == ('l','k'):
kr, diag = calc_ispec(model, diag)
diag = diag[:min_kr_length]
# Return the normalized diagnostic
return diag
# Loop through all diagnostics
for diag_name, attrs in m1.diagnostics.items():
# Skip diagnostics flagged as not for comparison (TODO: diagnostics
# should be objects and this should be a method, rather than a
# dictionary key)
if attrs.get('skip_comparison', False):
continue
# Skip diagnostics not present in the second model (usually not
# necessary)
if diag_name not in m2.diagnostics:
continue
# If we have multiple layers in this diagnostic, we want to consider
# them separately with different keys
if attrs['dims'][0] == 'lev':
layers = range(m1.nz)
elif attrs['dims'][0] == 'lev_mid':
layers = range(m1.nz - 1)
else:
layers = [None]
for layer in layers:
diag1 = get_normalized_diagnostic(m1, diag_name, layer)
diag2 = get_normalized_diagnostic(m2, diag_name, layer)
label = f"{diag_name}{'' if layer is None else layer+1}"
# Compute the error
if reduction == 'rmse':
diff = np.sqrt(np.mean((diag1-diag2)**2))
else:
diff = reduction(diag1, diag2)
diffs[label] = diff
return diffs
def diagnostic_similarities(model, target, baseline, **kw):
"""Like `diagnostic_differences`, but returning a dictionary of similarity
scores between negative infinity and 1 which quantify how much closer the
diagnostics of a given `model` are to a `target` with respect to a
`baseline`. Scores approach 1 when the distance between the model and the
target is small compared to the baseline and are negative when that
distance is greater.
Parameters
----------
model : pyqg.Model instance
The model for which we want to compute similiarity scores (e.g. a
parameterized low resolution model)
target : pyqg.Model instance
The target model (e.g. a high resolution model)
baseline : pyqg.Model instance
The baseline against which we check for improvement or degradation
(e.g. an unparameterized low resolution model)
Returns
-------
sims : dict
A dictionary of diagnostic name => similarity. If the diagnostic is
defined over multiple layers, separate keys are included with an
appended z index.
"""
d1 = diagnostic_differences(model, target, **kw)
d2 = diagnostic_differences(baseline, target, **kw)
sims = dict((k, 1-d1[k]/d2[k]) for k in d1.keys())
return sims | 0.940644 | 0.713057 |
import os
import re
import sys
def getExecutionPath(argv):
if "--workingDir" in argv:
index = argv.index("--workingDir")
if index < len(argv) - 1:
value = argv[len(argv) - 1]
print("Using workingDir value of " + value + ".")
return value
else:
print("No value provided for parameter --workingDir.")
return None
else:
return os.path.dirname(os.path.realpath(argv[0]))
EXECUTION_PATH = getExecutionPath(sys.argv)
IS_DRY_RUN = "--dryRun" in sys.argv
if EXECUTION_PATH == None:
print("Could not properly set execution path. Exiting...")
exit()
phragDirPath = os.path.join(EXECUTION_PATH, "phrag")
if not os.path.isdir(phragDirPath):
print("Execution path " + EXECUTION_PATH + " does not contain phrag dir. Exiting...")
exit()
phragDefs = os.listdir(phragDirPath)
for phragDef in phragDefs:
phragDefFileNames = os.listdir(os.path.join(EXECUTION_PATH, "phrag", phragDef))
fileRefs = [dict(path = os.path.join(EXECUTION_PATH, "phrag", phragDef, phragDefFileName), name=phragDefFileName) for phragDefFileName in phragDefFileNames]
templateFileRef = next((fr for fr in fileRefs if fr["name"] == phragDef + "." + "template"), None)
templateContent = None
with open(templateFileRef["path"], "r") as file:
templateContent = file.read()
if templateContent == None:
print("Could not read template file. Exiting...")
exit(1)
phragMarkers = re.findall('{{.*}}', templateContent)
phragsFileRefs = [fr for fr in fileRefs if ".template" not in fr["name"]]
defaultPhragFileRefs = list(filter(lambda x: ".default" in x["name"], phragsFileRefs))
for phragFileRef in defaultPhragFileRefs:
existingRef = next((f for f in phragsFileRefs if f["name"] == phragFileRef["name"].replace(".default", "")), None)
if existingRef == None:
dictCopy = dict(phragFileRef)
dictCopy["name"] = dictCopy["name"].replace(".default", "")
phragsFileRefs.append(dictCopy)
phragsFileRefs = [fr for fr in phragsFileRefs if ".default" not in fr["name"]]
for phragMarker in phragMarkers:
bareName = phragMarker.replace("{{", "").replace("}}", "")
matchingFileRef = next((fr for fr in phragsFileRefs if fr["name"] == bareName), None)
if matchingFileRef == None:
print("No matching file (or default file) found for marker " + phragMarker + ".")
continue
phragFileContent = ""
file = None
try:
file = open(matchingFileRef["path"], "r")
phragFileContent = file.read()
file.close()
except Error:
print("Could not open file " + matchingFileRef["path"])
if file is not None:
file.close()
templateContent = templateContent.replace(phragMarker, phragFileContent)
if (IS_DRY_RUN):
print("WARNING: --dryRun flag passed. The file will not be written")
print(templateContent)
else:
print("Writing file " + phragDef + "...")
with open(os.path.join(EXECUTION_PATH, phragDef), "w") as file:
file.write(templateContent)
print("Done.") | phrag.py | import os
import re
import sys
def getExecutionPath(argv):
if "--workingDir" in argv:
index = argv.index("--workingDir")
if index < len(argv) - 1:
value = argv[len(argv) - 1]
print("Using workingDir value of " + value + ".")
return value
else:
print("No value provided for parameter --workingDir.")
return None
else:
return os.path.dirname(os.path.realpath(argv[0]))
EXECUTION_PATH = getExecutionPath(sys.argv)
IS_DRY_RUN = "--dryRun" in sys.argv
if EXECUTION_PATH == None:
print("Could not properly set execution path. Exiting...")
exit()
phragDirPath = os.path.join(EXECUTION_PATH, "phrag")
if not os.path.isdir(phragDirPath):
print("Execution path " + EXECUTION_PATH + " does not contain phrag dir. Exiting...")
exit()
phragDefs = os.listdir(phragDirPath)
for phragDef in phragDefs:
phragDefFileNames = os.listdir(os.path.join(EXECUTION_PATH, "phrag", phragDef))
fileRefs = [dict(path = os.path.join(EXECUTION_PATH, "phrag", phragDef, phragDefFileName), name=phragDefFileName) for phragDefFileName in phragDefFileNames]
templateFileRef = next((fr for fr in fileRefs if fr["name"] == phragDef + "." + "template"), None)
templateContent = None
with open(templateFileRef["path"], "r") as file:
templateContent = file.read()
if templateContent == None:
print("Could not read template file. Exiting...")
exit(1)
phragMarkers = re.findall('{{.*}}', templateContent)
phragsFileRefs = [fr for fr in fileRefs if ".template" not in fr["name"]]
defaultPhragFileRefs = list(filter(lambda x: ".default" in x["name"], phragsFileRefs))
for phragFileRef in defaultPhragFileRefs:
existingRef = next((f for f in phragsFileRefs if f["name"] == phragFileRef["name"].replace(".default", "")), None)
if existingRef == None:
dictCopy = dict(phragFileRef)
dictCopy["name"] = dictCopy["name"].replace(".default", "")
phragsFileRefs.append(dictCopy)
phragsFileRefs = [fr for fr in phragsFileRefs if ".default" not in fr["name"]]
for phragMarker in phragMarkers:
bareName = phragMarker.replace("{{", "").replace("}}", "")
matchingFileRef = next((fr for fr in phragsFileRefs if fr["name"] == bareName), None)
if matchingFileRef == None:
print("No matching file (or default file) found for marker " + phragMarker + ".")
continue
phragFileContent = ""
file = None
try:
file = open(matchingFileRef["path"], "r")
phragFileContent = file.read()
file.close()
except Error:
print("Could not open file " + matchingFileRef["path"])
if file is not None:
file.close()
templateContent = templateContent.replace(phragMarker, phragFileContent)
if (IS_DRY_RUN):
print("WARNING: --dryRun flag passed. The file will not be written")
print(templateContent)
else:
print("Writing file " + phragDef + "...")
with open(os.path.join(EXECUTION_PATH, phragDef), "w") as file:
file.write(templateContent)
print("Done.") | 0.055797 | 0.050075 |
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class SecurityScanConfig(pulumi.CustomResource):
authentication: pulumi.Output[dict]
blacklist_patterns: pulumi.Output[list]
display_name: pulumi.Output[str]
export_to_security_command_center: pulumi.Output[str]
max_qps: pulumi.Output[float]
name: pulumi.Output[str]
project: pulumi.Output[str]
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
schedule: pulumi.Output[dict]
starting_urls: pulumi.Output[list]
target_platforms: pulumi.Output[list]
user_agent: pulumi.Output[str]
def __init__(__self__, resource_name, opts=None, authentication=None, blacklist_patterns=None, display_name=None, export_to_security_command_center=None, max_qps=None, project=None, schedule=None, starting_urls=None, target_platforms=None, user_agent=None, __props__=None, __name__=None, __opts__=None):
"""
Create a SecurityScanConfig resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
The **authentication** object supports the following:
* `customAccount` (`pulumi.Input[dict]`)
* `loginUrl` (`pulumi.Input[str]`)
* `password` (`pulumi.Input[str]`)
* `username` (`pulumi.Input[str]`)
* `googleAccount` (`pulumi.Input[dict]`)
* `password` (`pulumi.Input[str]`)
* `username` (`pulumi.Input[str]`)
The **schedule** object supports the following:
* `intervalDurationDays` (`pulumi.Input[float]`)
* `scheduleTime` (`pulumi.Input[str]`)
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/security_scanner_scan_config.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['authentication'] = authentication
__props__['blacklist_patterns'] = blacklist_patterns
if display_name is None:
raise TypeError("Missing required property 'display_name'")
__props__['display_name'] = display_name
__props__['export_to_security_command_center'] = export_to_security_command_center
__props__['max_qps'] = max_qps
__props__['project'] = project
__props__['schedule'] = schedule
if starting_urls is None:
raise TypeError("Missing required property 'starting_urls'")
__props__['starting_urls'] = starting_urls
__props__['target_platforms'] = target_platforms
__props__['user_agent'] = user_agent
__props__['name'] = None
super(SecurityScanConfig, __self__).__init__(
'gcp:compute/securityScanConfig:SecurityScanConfig',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, authentication=None, blacklist_patterns=None, display_name=None, export_to_security_command_center=None, max_qps=None, name=None, project=None, schedule=None, starting_urls=None, target_platforms=None, user_agent=None):
"""
Get an existing SecurityScanConfig resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
The **authentication** object supports the following:
* `customAccount` (`pulumi.Input[dict]`)
* `loginUrl` (`pulumi.Input[str]`)
* `password` (`pulumi.Input[str]`)
* `username` (`pulumi.Input[str]`)
* `googleAccount` (`pulumi.Input[dict]`)
* `password` (`pulumi.Input[str]`)
* `username` (`pulumi.Input[str]`)
The **schedule** object supports the following:
* `intervalDurationDays` (`pulumi.Input[float]`)
* `scheduleTime` (`pulumi.Input[str]`)
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/security_scanner_scan_config.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["authentication"] = authentication
__props__["blacklist_patterns"] = blacklist_patterns
__props__["display_name"] = display_name
__props__["export_to_security_command_center"] = export_to_security_command_center
__props__["max_qps"] = max_qps
__props__["name"] = name
__props__["project"] = project
__props__["schedule"] = schedule
__props__["starting_urls"] = starting_urls
__props__["target_platforms"] = target_platforms
__props__["user_agent"] = user_agent
return SecurityScanConfig(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop | sdk/python/pulumi_gcp/compute/security_scan_config.py |
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class SecurityScanConfig(pulumi.CustomResource):
authentication: pulumi.Output[dict]
blacklist_patterns: pulumi.Output[list]
display_name: pulumi.Output[str]
export_to_security_command_center: pulumi.Output[str]
max_qps: pulumi.Output[float]
name: pulumi.Output[str]
project: pulumi.Output[str]
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
schedule: pulumi.Output[dict]
starting_urls: pulumi.Output[list]
target_platforms: pulumi.Output[list]
user_agent: pulumi.Output[str]
def __init__(__self__, resource_name, opts=None, authentication=None, blacklist_patterns=None, display_name=None, export_to_security_command_center=None, max_qps=None, project=None, schedule=None, starting_urls=None, target_platforms=None, user_agent=None, __props__=None, __name__=None, __opts__=None):
"""
Create a SecurityScanConfig resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
The **authentication** object supports the following:
* `customAccount` (`pulumi.Input[dict]`)
* `loginUrl` (`pulumi.Input[str]`)
* `password` (`pulumi.Input[str]`)
* `username` (`pulumi.Input[str]`)
* `googleAccount` (`pulumi.Input[dict]`)
* `password` (`pulumi.Input[str]`)
* `username` (`pulumi.Input[str]`)
The **schedule** object supports the following:
* `intervalDurationDays` (`pulumi.Input[float]`)
* `scheduleTime` (`pulumi.Input[str]`)
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/security_scanner_scan_config.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['authentication'] = authentication
__props__['blacklist_patterns'] = blacklist_patterns
if display_name is None:
raise TypeError("Missing required property 'display_name'")
__props__['display_name'] = display_name
__props__['export_to_security_command_center'] = export_to_security_command_center
__props__['max_qps'] = max_qps
__props__['project'] = project
__props__['schedule'] = schedule
if starting_urls is None:
raise TypeError("Missing required property 'starting_urls'")
__props__['starting_urls'] = starting_urls
__props__['target_platforms'] = target_platforms
__props__['user_agent'] = user_agent
__props__['name'] = None
super(SecurityScanConfig, __self__).__init__(
'gcp:compute/securityScanConfig:SecurityScanConfig',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, authentication=None, blacklist_patterns=None, display_name=None, export_to_security_command_center=None, max_qps=None, name=None, project=None, schedule=None, starting_urls=None, target_platforms=None, user_agent=None):
"""
Get an existing SecurityScanConfig resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
The **authentication** object supports the following:
* `customAccount` (`pulumi.Input[dict]`)
* `loginUrl` (`pulumi.Input[str]`)
* `password` (`pulumi.Input[str]`)
* `username` (`pulumi.Input[str]`)
* `googleAccount` (`pulumi.Input[dict]`)
* `password` (`pulumi.Input[str]`)
* `username` (`pulumi.Input[str]`)
The **schedule** object supports the following:
* `intervalDurationDays` (`pulumi.Input[float]`)
* `scheduleTime` (`pulumi.Input[str]`)
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/security_scanner_scan_config.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["authentication"] = authentication
__props__["blacklist_patterns"] = blacklist_patterns
__props__["display_name"] = display_name
__props__["export_to_security_command_center"] = export_to_security_command_center
__props__["max_qps"] = max_qps
__props__["name"] = name
__props__["project"] = project
__props__["schedule"] = schedule
__props__["starting_urls"] = starting_urls
__props__["target_platforms"] = target_platforms
__props__["user_agent"] = user_agent
return SecurityScanConfig(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop | 0.740925 | 0.069069 |
from avgn.utils.paths import DATA_DIR
import avgn
from avgn.utils.json import NoIndent, NoIndentEncoder
import json
import numpy as np
import librosa
import xml.etree.ElementTree
from avgn.utils.audio import get_samplerate
import pandas as pd
from tqdm.autonotebook import tqdm
DATASET_ID = "koumura_bengalese_finch"
def Koumura_Okanoya_parser(bird_xml_locs, wav_list):
""" parses XML from Koumura_Okanoya data format
"""
song_df = pd.DataFrame(
columns=[
"bird",
"WaveFileName",
"Position",
"Length",
"NumNote",
"NotePositions",
"NoteLengths",
"NoteLabels",
]
)
for bird_loc in tqdm(bird_xml_locs):
bird_xml = xml.etree.ElementTree.parse(bird_loc).getroot()
bird = bird_loc.parent.stem
for element in tqdm(bird_xml.getchildren(), leave=False):
if element.tag == "Sequence":
notePositions = []
noteLengths = []
noteLabels = []
for seq_element in element.getchildren():
if seq_element.tag == "Position":
position = seq_element.text
elif seq_element.tag == "Length":
length = seq_element.text
elif seq_element.tag == "WaveFileName":
WaveFileName = seq_element.text
elif seq_element.tag == "NumNote":
NumNote = seq_element.text
elif seq_element.tag == "Note":
for note_element in seq_element.getchildren():
if note_element.tag == "Label":
noteLabels.append(note_element.text)
elif note_element.tag == "Position":
notePositions.append(note_element.text)
elif note_element.tag == "Length":
noteLengths.append(note_element.text)
song_df.loc[len(song_df)] = [
bird,
WaveFileName,
position,
length,
NumNote,
notePositions,
noteLengths,
noteLabels,
]
return song_df
def generate_json(DSLOC, DT_ID, bird, wfn, wfn_df):
# wav location
wav_loc = DSLOC / bird / "Wave" / wfn
# wav info
sr = get_samplerate(wav_loc.as_posix())
wav_duration = librosa.get_duration(filename=wav_loc)
# make json dictionary
json_dict = {}
# add species
json_dict["species"] = "Lonchura striata domestica"
json_dict["common_name"] = "Bengalese finch"
json_dict["wav_loc"] = wav_loc.as_posix()
# rate and length
json_dict["samplerate_hz"] = sr
json_dict["length_s"] = wav_duration
# make a dataframe of wav info
# wfn_df = bird_df[bird_df.WaveFileName == wfn]
seq_df = pd.DataFrame(
(
[
[
list(np.repeat(sequence_num, len(row.NotePositions))),
list(row.NoteLabels),
np.array(
(np.array(row.NotePositions).astype("int") + int(row.Position))
/ sr
).astype("float64"),
np.array(
(
np.array(row.NotePositions).astype("int")
+ np.array(row.NoteLengths).astype("int")
+ int(row.Position)
)
/ sr
).astype("float64"),
]
for sequence_num, (idx, row) in enumerate(wfn_df.iterrows())
]
),
columns=["sequence_num", "labels", "start_times", "end_times"],
)
# add syllable information
json_dict["indvs"] = {
bird: {
"notes": {
"start_times": NoIndent(
list(np.concatenate(seq_df.start_times.values))
),
"end_times": NoIndent(list(np.concatenate(seq_df.end_times.values))),
"labels": NoIndent(list(np.concatenate(seq_df.labels.values))),
"sequence_num": NoIndent(
[int(i) for i in np.concatenate(seq_df.sequence_num.values)]
),
}
}
}
# dump json
json_txt = json.dumps(json_dict, cls=NoIndentEncoder, indent=2)
wav_stem = bird + "_" + wfn.split(".")[0]
json_out = (
DATA_DIR / "processed" / DATASET_ID / DT_ID / "JSON" / (wav_stem + ".JSON")
)
# save json
avgn.utils.paths.ensure_dir(json_out.as_posix())
print(json_txt, file=open(json_out.as_posix(), "w")) | avgn/custom_parsing/koumura_bengalese_finch.py | from avgn.utils.paths import DATA_DIR
import avgn
from avgn.utils.json import NoIndent, NoIndentEncoder
import json
import numpy as np
import librosa
import xml.etree.ElementTree
from avgn.utils.audio import get_samplerate
import pandas as pd
from tqdm.autonotebook import tqdm
DATASET_ID = "koumura_bengalese_finch"
def Koumura_Okanoya_parser(bird_xml_locs, wav_list):
""" parses XML from Koumura_Okanoya data format
"""
song_df = pd.DataFrame(
columns=[
"bird",
"WaveFileName",
"Position",
"Length",
"NumNote",
"NotePositions",
"NoteLengths",
"NoteLabels",
]
)
for bird_loc in tqdm(bird_xml_locs):
bird_xml = xml.etree.ElementTree.parse(bird_loc).getroot()
bird = bird_loc.parent.stem
for element in tqdm(bird_xml.getchildren(), leave=False):
if element.tag == "Sequence":
notePositions = []
noteLengths = []
noteLabels = []
for seq_element in element.getchildren():
if seq_element.tag == "Position":
position = seq_element.text
elif seq_element.tag == "Length":
length = seq_element.text
elif seq_element.tag == "WaveFileName":
WaveFileName = seq_element.text
elif seq_element.tag == "NumNote":
NumNote = seq_element.text
elif seq_element.tag == "Note":
for note_element in seq_element.getchildren():
if note_element.tag == "Label":
noteLabels.append(note_element.text)
elif note_element.tag == "Position":
notePositions.append(note_element.text)
elif note_element.tag == "Length":
noteLengths.append(note_element.text)
song_df.loc[len(song_df)] = [
bird,
WaveFileName,
position,
length,
NumNote,
notePositions,
noteLengths,
noteLabels,
]
return song_df
def generate_json(DSLOC, DT_ID, bird, wfn, wfn_df):
# wav location
wav_loc = DSLOC / bird / "Wave" / wfn
# wav info
sr = get_samplerate(wav_loc.as_posix())
wav_duration = librosa.get_duration(filename=wav_loc)
# make json dictionary
json_dict = {}
# add species
json_dict["species"] = "Lonchura striata domestica"
json_dict["common_name"] = "Bengalese finch"
json_dict["wav_loc"] = wav_loc.as_posix()
# rate and length
json_dict["samplerate_hz"] = sr
json_dict["length_s"] = wav_duration
# make a dataframe of wav info
# wfn_df = bird_df[bird_df.WaveFileName == wfn]
seq_df = pd.DataFrame(
(
[
[
list(np.repeat(sequence_num, len(row.NotePositions))),
list(row.NoteLabels),
np.array(
(np.array(row.NotePositions).astype("int") + int(row.Position))
/ sr
).astype("float64"),
np.array(
(
np.array(row.NotePositions).astype("int")
+ np.array(row.NoteLengths).astype("int")
+ int(row.Position)
)
/ sr
).astype("float64"),
]
for sequence_num, (idx, row) in enumerate(wfn_df.iterrows())
]
),
columns=["sequence_num", "labels", "start_times", "end_times"],
)
# add syllable information
json_dict["indvs"] = {
bird: {
"notes": {
"start_times": NoIndent(
list(np.concatenate(seq_df.start_times.values))
),
"end_times": NoIndent(list(np.concatenate(seq_df.end_times.values))),
"labels": NoIndent(list(np.concatenate(seq_df.labels.values))),
"sequence_num": NoIndent(
[int(i) for i in np.concatenate(seq_df.sequence_num.values)]
),
}
}
}
# dump json
json_txt = json.dumps(json_dict, cls=NoIndentEncoder, indent=2)
wav_stem = bird + "_" + wfn.split(".")[0]
json_out = (
DATA_DIR / "processed" / DATASET_ID / DT_ID / "JSON" / (wav_stem + ".JSON")
)
# save json
avgn.utils.paths.ensure_dir(json_out.as_posix())
print(json_txt, file=open(json_out.as_posix(), "w")) | 0.380644 | 0.218909 |
import scipy as N
def fastnorm(x):
""" Fast Euclidean Norm (L2)
This version should be faster than numpy.linalg.norm if
the dot function uses blas.
Inputs:
x -- numpy array
Output:
L2 norm from 1d representation of x
"""
xv = x.ravel()
return N.dot(xv, xv)**(1/2.)
def fastsvd(M):
""" Fast Singular Value Decomposition
Inputs:
M -- 2d numpy array
Outputs:
U,S,V -- see scipy.linalg.svd
"""
h, w = M.shape
# -- thin matrix
if h >= w:
# subspace of M'M
U, S, V = N.linalg.svd(N.dot(M.T, M))
U = N.dot(M, V.T)
# normalize
for i in xrange(w):
S[i] = fastnorm(U[:,i])
U[:,i] = U[:,i] / S[i]
# -- fat matrix
else:
# subspace of MM'
U, S, V = N.linalg.svd(N.dot(M, M.T))
V = N.dot(U.T, M)
# normalize
for i in xrange(h):
S[i] = fastnorm(V[i])
V[i,:] = V[i] / S[i]
return U, S, V
def gabor2d(gsw, gsh, gx0, gy0, wfreq, worient, wphase, shape):
""" Generate a gabor 2d array
Inputs:
gsw -- standard deviation of the gaussian envelope (width)
gsh -- standard deviation of the gaussian envelope (height)
gx0 -- x indice of center of the gaussian envelope
gy0 -- y indice of center of the gaussian envelope
wfreq -- frequency of the 2d wave
worient -- orientation of the 2d wave
wphase -- phase of the 2d wave
shape -- shape tuple (height, width)
Outputs:
gabor -- 2d gabor with zero-mean and unit-variance
"""
height, width = shape
y, x = N.mgrid[0:height, 0:width]
X = x * N.cos(worient) * wfreq
Y = y * N.sin(worient) * wfreq
env = N.exp( -.5 * ( ((x-gx0)**2./gsw**2.) + ((y-gy0)**2./gsh**2.) ) )
wave = N.exp( 1j*(2*N.pi*(X+Y) + wphase) )
gabor = N.real(env * wave)
gabor -= gabor.mean()
gabor /= fastnorm(gabor)
return gabor | v1s_math.py | import scipy as N
def fastnorm(x):
""" Fast Euclidean Norm (L2)
This version should be faster than numpy.linalg.norm if
the dot function uses blas.
Inputs:
x -- numpy array
Output:
L2 norm from 1d representation of x
"""
xv = x.ravel()
return N.dot(xv, xv)**(1/2.)
def fastsvd(M):
""" Fast Singular Value Decomposition
Inputs:
M -- 2d numpy array
Outputs:
U,S,V -- see scipy.linalg.svd
"""
h, w = M.shape
# -- thin matrix
if h >= w:
# subspace of M'M
U, S, V = N.linalg.svd(N.dot(M.T, M))
U = N.dot(M, V.T)
# normalize
for i in xrange(w):
S[i] = fastnorm(U[:,i])
U[:,i] = U[:,i] / S[i]
# -- fat matrix
else:
# subspace of MM'
U, S, V = N.linalg.svd(N.dot(M, M.T))
V = N.dot(U.T, M)
# normalize
for i in xrange(h):
S[i] = fastnorm(V[i])
V[i,:] = V[i] / S[i]
return U, S, V
def gabor2d(gsw, gsh, gx0, gy0, wfreq, worient, wphase, shape):
""" Generate a gabor 2d array
Inputs:
gsw -- standard deviation of the gaussian envelope (width)
gsh -- standard deviation of the gaussian envelope (height)
gx0 -- x indice of center of the gaussian envelope
gy0 -- y indice of center of the gaussian envelope
wfreq -- frequency of the 2d wave
worient -- orientation of the 2d wave
wphase -- phase of the 2d wave
shape -- shape tuple (height, width)
Outputs:
gabor -- 2d gabor with zero-mean and unit-variance
"""
height, width = shape
y, x = N.mgrid[0:height, 0:width]
X = x * N.cos(worient) * wfreq
Y = y * N.sin(worient) * wfreq
env = N.exp( -.5 * ( ((x-gx0)**2./gsw**2.) + ((y-gy0)**2./gsh**2.) ) )
wave = N.exp( 1j*(2*N.pi*(X+Y) + wphase) )
gabor = N.real(env * wave)
gabor -= gabor.mean()
gabor /= fastnorm(gabor)
return gabor | 0.621656 | 0.537163 |
from collections import defaultdict
from copy import copy
from sqlglot import expressions as exp
from sqlglot.errors import OptimizeError
from sqlglot.optimizer.helper import SelectParts
# Sentinel value that means an outer query selecting ALL columns
SELECT_ALL = object()
def projection_pushdown(expression):
"""
Rewrite sqlglot AST to remove unused columns projections.
Example:
>>> import sqlglot
>>> sql = "SELECT y.a AS a FROM (SELECT x.a AS a, x.b AS b FROM x) AS y"
>>> expression = sqlglot.parse_one(sql)
>>> projection_pushdown(expression).sql()
'SELECT y.a AS a FROM (SELECT x.a AS a FROM x) AS y'
Args:
expression (sqlglot.Expression): expression to optimize
Returns:
sqlglot.Expression: optimized expression
"""
expression = expression.copy()
_pushdown_statement(expression, SELECT_ALL)
return expression
def _pushdown_statement(expression, parent_selections):
"""
Search SELECT or UNION for columns that can be removed.
Args:
expression (exp.Select or exp.Union): expression to search
parent_selections: columns being selected by an outer query.
This can be the special value `SELECT_ALL`, which mean the outer query
is selecting everything.
Returns:
dict: Mapping of selectable names to columns.
This is used during recursion, so the outer query can:
1. pullup any selected columns
2. pushdown selected columns into CTEs
"""
if isinstance(expression, exp.Select):
return _pushdown_select(expression, parent_selections)
if isinstance(expression, exp.Union):
if expression.args.get("distinct"):
# We can't remove selections on simple UNION
parent_selections = SELECT_ALL
selections = _pushdown_select(expression.this, parent_selections)
selections = _merge_selections(
selections,
_pushdown_select(expression.args.get("expression"), parent_selections),
)
_pushdown_ctes(expression.ctes, selections)
return selections
raise OptimizeError(f"Unexpected statement type: {type(expression)}")
def _pushdown_select(expression, parent_selections):
"""
Search SELECT for columns that can be removed.
Returns:
Same as `_pushdown_statement`
"""
parts = SelectParts.build(expression)
# Collect a map of all referenced columns
columns = {}
for column in parts.columns:
selectable_name = column.text("table")
column_name = column.text("this")
if not selectable_name:
msg = (
"Expected all columns to have table prefixes. "
"Did you run 'qualify_columns' first?\n"
f"Received: {column_name}"
)
raise OptimizeError(msg)
# Use the Expression identity for the key since Expressions are hashed by value.
columns[id(column)] = column
# Collect all the selections
if not expression.args.get("distinct"):
columns = _remove_unused_selections(expression, columns, parent_selections)
for subquery in parts.subqueries:
# Subqueries (as opposed to "derived_tables") aren't "selectable".
# So none of the columns in the current scope can reference these.
_pushdown_statement(subquery, SELECT_ALL)
# Now that we've removed all the unused columns from the selections, let's
# build a map of all the columns we're selecting from derived tables.
derived_table_selections = defaultdict(set)
for column in columns.values():
derived_table_selections[column.text("table")].add(column.text("this"))
for subquery in parts.derived_tables:
_pushdown_statement(subquery.this, derived_table_selections[subquery.alias])
_pushdown_ctes(parts.ctes, derived_table_selections)
# Push the selections back UP so they can be used by CTEs in outer queries
return derived_table_selections
def _pushdown_ctes(ctes, selections):
if not ctes:
return
# Iterate in reversed order as a CTE can reference outputs in previous CTEs
for cte in reversed(ctes):
selections = _merge_selections(
selections, _pushdown_statement(cte.this, selections.get(cte.alias, set()))
)
def _remove_unused_selections(expression, columns, parent_selections):
columns = copy(columns)
new_selections = []
for selection in expression.selects:
if not isinstance(selection, exp.Alias):
msg = (
"Expected all selections to have aliases. "
"Did you run 'qualify_columns' first?\n"
f"Received: {selection}"
)
raise OptimizeError(msg)
if parent_selections is SELECT_ALL or selection.alias in parent_selections:
new_selections.append(selection)
else:
# Pop the column out of the set of all columns.
# Later, we'll use this set of columns to pushdown the selected columns from inner queries.
for column in selection.find_all(exp.Column):
columns.pop(id(column))
# If there are no remaining selections, just select a single constant
if not new_selections:
new_selections.append(exp.alias_("1", "_"))
expression.set("expressions", new_selections)
return columns
def _merge_selections(*selections):
result = defaultdict(set)
for s in selections:
for name, columns in s.items():
result[name].update(columns)
return result | sqlglot/optimizer/projection_pushdown.py | from collections import defaultdict
from copy import copy
from sqlglot import expressions as exp
from sqlglot.errors import OptimizeError
from sqlglot.optimizer.helper import SelectParts
# Sentinel value that means an outer query selecting ALL columns
SELECT_ALL = object()
def projection_pushdown(expression):
"""
Rewrite sqlglot AST to remove unused columns projections.
Example:
>>> import sqlglot
>>> sql = "SELECT y.a AS a FROM (SELECT x.a AS a, x.b AS b FROM x) AS y"
>>> expression = sqlglot.parse_one(sql)
>>> projection_pushdown(expression).sql()
'SELECT y.a AS a FROM (SELECT x.a AS a FROM x) AS y'
Args:
expression (sqlglot.Expression): expression to optimize
Returns:
sqlglot.Expression: optimized expression
"""
expression = expression.copy()
_pushdown_statement(expression, SELECT_ALL)
return expression
def _pushdown_statement(expression, parent_selections):
"""
Search SELECT or UNION for columns that can be removed.
Args:
expression (exp.Select or exp.Union): expression to search
parent_selections: columns being selected by an outer query.
This can be the special value `SELECT_ALL`, which mean the outer query
is selecting everything.
Returns:
dict: Mapping of selectable names to columns.
This is used during recursion, so the outer query can:
1. pullup any selected columns
2. pushdown selected columns into CTEs
"""
if isinstance(expression, exp.Select):
return _pushdown_select(expression, parent_selections)
if isinstance(expression, exp.Union):
if expression.args.get("distinct"):
# We can't remove selections on simple UNION
parent_selections = SELECT_ALL
selections = _pushdown_select(expression.this, parent_selections)
selections = _merge_selections(
selections,
_pushdown_select(expression.args.get("expression"), parent_selections),
)
_pushdown_ctes(expression.ctes, selections)
return selections
raise OptimizeError(f"Unexpected statement type: {type(expression)}")
def _pushdown_select(expression, parent_selections):
"""
Search SELECT for columns that can be removed.
Returns:
Same as `_pushdown_statement`
"""
parts = SelectParts.build(expression)
# Collect a map of all referenced columns
columns = {}
for column in parts.columns:
selectable_name = column.text("table")
column_name = column.text("this")
if not selectable_name:
msg = (
"Expected all columns to have table prefixes. "
"Did you run 'qualify_columns' first?\n"
f"Received: {column_name}"
)
raise OptimizeError(msg)
# Use the Expression identity for the key since Expressions are hashed by value.
columns[id(column)] = column
# Collect all the selections
if not expression.args.get("distinct"):
columns = _remove_unused_selections(expression, columns, parent_selections)
for subquery in parts.subqueries:
# Subqueries (as opposed to "derived_tables") aren't "selectable".
# So none of the columns in the current scope can reference these.
_pushdown_statement(subquery, SELECT_ALL)
# Now that we've removed all the unused columns from the selections, let's
# build a map of all the columns we're selecting from derived tables.
derived_table_selections = defaultdict(set)
for column in columns.values():
derived_table_selections[column.text("table")].add(column.text("this"))
for subquery in parts.derived_tables:
_pushdown_statement(subquery.this, derived_table_selections[subquery.alias])
_pushdown_ctes(parts.ctes, derived_table_selections)
# Push the selections back UP so they can be used by CTEs in outer queries
return derived_table_selections
def _pushdown_ctes(ctes, selections):
if not ctes:
return
# Iterate in reversed order as a CTE can reference outputs in previous CTEs
for cte in reversed(ctes):
selections = _merge_selections(
selections, _pushdown_statement(cte.this, selections.get(cte.alias, set()))
)
def _remove_unused_selections(expression, columns, parent_selections):
columns = copy(columns)
new_selections = []
for selection in expression.selects:
if not isinstance(selection, exp.Alias):
msg = (
"Expected all selections to have aliases. "
"Did you run 'qualify_columns' first?\n"
f"Received: {selection}"
)
raise OptimizeError(msg)
if parent_selections is SELECT_ALL or selection.alias in parent_selections:
new_selections.append(selection)
else:
# Pop the column out of the set of all columns.
# Later, we'll use this set of columns to pushdown the selected columns from inner queries.
for column in selection.find_all(exp.Column):
columns.pop(id(column))
# If there are no remaining selections, just select a single constant
if not new_selections:
new_selections.append(exp.alias_("1", "_"))
expression.set("expressions", new_selections)
return columns
def _merge_selections(*selections):
result = defaultdict(set)
for s in selections:
for name, columns in s.items():
result[name].update(columns)
return result | 0.913489 | 0.595992 |
import json
from os.path import join
from functools import wraps
from twisted.internet.threads import deferToThread
from twisted.internet.task import deferLater
from twisted.internet.defer import inlineCallbacks
from twisted.internet import reactor
from slyd.projects import ProjectsManager
from slyd.projecttemplates import templates
from slyd.errors import BadRequest
from .repoman import Repoman
def run_in_thread(func):
'''A decorator to defer execution to a thread'''
@wraps(func)
def wrapper(*args, **kwargs):
return deferToThread(func, *args, **kwargs)
return wrapper
def retry_operation(retries=3, catches=(Exception,), seconds=0):
'''
:param retries: Number of times to attempt the operation
:param catches: Which exceptions to catch and trigger a retry
:param seconds: How long to wait between retries
'''
def wrapper(func):
def sleep(sec):
return deferLater(reactor, sec, lambda: None)
@wraps(func)
@inlineCallbacks
def wrapped(*args, **kwargs):
err = None
for _ in range(retries):
try:
yield func(*args, **kwargs)
except catches as e:
err = e
yield sleep(seconds)
else:
break
if err is not None:
raise err
return wrapped
return wrapper
class GitProjectsManager(ProjectsManager):
@classmethod
def setup(cls, storage_backend, location):
Repoman.setup(storage_backend, location)
def __init__(self, *args, **kwargs):
ProjectsManager.__init__(self, *args, **kwargs)
self.project_commands = {
'create': self.create_project,
'mv': self.rename_project,
'rm': self.remove_project,
'edit': self.edit_project,
'publish': self.publish_project,
'discard': self.discard_changes,
'revisions': self.project_revisions,
'conflicts': self.conflicted_files,
'changes': self.changed_files,
'save': self.save_file,
}
def _open_repo(self, name):
return Repoman.open_repo(name)
def _get_branch(self, repo, read_only=False):
if repo.has_branch(self.user):
return self.user
elif not read_only:
repo.create_branch(self.user, repo.get_branch('master'))
return self.user
else:
return 'master'
def all_projects(self):
return Repoman.list_repos()
def create_project(self, name):
self.validate_project_name(name)
project_files = {
'project.json': templates['PROJECT'],
'scrapy.cfg': templates['SCRAPY'],
'setup.py': templates['SETUP'] % str(name),
join('spiders', '__init__.py'): '',
join('spiders', 'settings.py'): templates['SETTINGS'],
}
try:
Repoman.create_repo(name).save_files(project_files, 'master')
except NameError:
raise BadRequest("Bad Request",
"Project already exists with that name")
def remove_project(self, name):
Repoman.delete_repo(name)
def edit_project(self, name, revision):
# Do nothing here, but subclasses can use this method as a hook
# e.g. to import projects from another source.
return
@run_in_thread
def publish_project(self, name, force):
repoman = self._open_repo(name)
if repoman.publish_branch(self._get_branch(repoman), force):
repoman.kill_branch(self._get_branch(repoman))
return {'status': 'ok'}
else:
return {'status': 'conflict'}
def discard_changes(self, name):
repoman = self._open_repo(name)
repoman.kill_branch(self._get_branch(repoman))
def project_revisions(self, name):
repoman = self._open_repo(name)
return json.dumps({'revisions': repoman.get_published_revisions()})
@run_in_thread
def conflicted_files(self, name):
repoman = self._open_repo(name)
return json.dumps(
repoman.get_branch_conflicted_files(
self._get_branch(repoman, read_only=True)))
@run_in_thread
def changed_files(self, name):
return self._changed_files(name)
def _changed_files(self, name):
repoman = self._open_repo(name)
return json.dumps(repoman.get_branch_changed_files(
self._get_branch(repoman, read_only=True)))
def save_file(self, name, file_path, file_contents):
repoman = self._open_repo(name)
repoman.save_file(file_path, json.dumps(
file_contents,
sort_keys=True, indent=4), self._get_branch(repoman)) | slyd/slyd/gitstorage/projects.py | import json
from os.path import join
from functools import wraps
from twisted.internet.threads import deferToThread
from twisted.internet.task import deferLater
from twisted.internet.defer import inlineCallbacks
from twisted.internet import reactor
from slyd.projects import ProjectsManager
from slyd.projecttemplates import templates
from slyd.errors import BadRequest
from .repoman import Repoman
def run_in_thread(func):
'''A decorator to defer execution to a thread'''
@wraps(func)
def wrapper(*args, **kwargs):
return deferToThread(func, *args, **kwargs)
return wrapper
def retry_operation(retries=3, catches=(Exception,), seconds=0):
'''
:param retries: Number of times to attempt the operation
:param catches: Which exceptions to catch and trigger a retry
:param seconds: How long to wait between retries
'''
def wrapper(func):
def sleep(sec):
return deferLater(reactor, sec, lambda: None)
@wraps(func)
@inlineCallbacks
def wrapped(*args, **kwargs):
err = None
for _ in range(retries):
try:
yield func(*args, **kwargs)
except catches as e:
err = e
yield sleep(seconds)
else:
break
if err is not None:
raise err
return wrapped
return wrapper
class GitProjectsManager(ProjectsManager):
@classmethod
def setup(cls, storage_backend, location):
Repoman.setup(storage_backend, location)
def __init__(self, *args, **kwargs):
ProjectsManager.__init__(self, *args, **kwargs)
self.project_commands = {
'create': self.create_project,
'mv': self.rename_project,
'rm': self.remove_project,
'edit': self.edit_project,
'publish': self.publish_project,
'discard': self.discard_changes,
'revisions': self.project_revisions,
'conflicts': self.conflicted_files,
'changes': self.changed_files,
'save': self.save_file,
}
def _open_repo(self, name):
return Repoman.open_repo(name)
def _get_branch(self, repo, read_only=False):
if repo.has_branch(self.user):
return self.user
elif not read_only:
repo.create_branch(self.user, repo.get_branch('master'))
return self.user
else:
return 'master'
def all_projects(self):
return Repoman.list_repos()
def create_project(self, name):
self.validate_project_name(name)
project_files = {
'project.json': templates['PROJECT'],
'scrapy.cfg': templates['SCRAPY'],
'setup.py': templates['SETUP'] % str(name),
join('spiders', '__init__.py'): '',
join('spiders', 'settings.py'): templates['SETTINGS'],
}
try:
Repoman.create_repo(name).save_files(project_files, 'master')
except NameError:
raise BadRequest("Bad Request",
"Project already exists with that name")
def remove_project(self, name):
Repoman.delete_repo(name)
def edit_project(self, name, revision):
# Do nothing here, but subclasses can use this method as a hook
# e.g. to import projects from another source.
return
@run_in_thread
def publish_project(self, name, force):
repoman = self._open_repo(name)
if repoman.publish_branch(self._get_branch(repoman), force):
repoman.kill_branch(self._get_branch(repoman))
return {'status': 'ok'}
else:
return {'status': 'conflict'}
def discard_changes(self, name):
repoman = self._open_repo(name)
repoman.kill_branch(self._get_branch(repoman))
def project_revisions(self, name):
repoman = self._open_repo(name)
return json.dumps({'revisions': repoman.get_published_revisions()})
@run_in_thread
def conflicted_files(self, name):
repoman = self._open_repo(name)
return json.dumps(
repoman.get_branch_conflicted_files(
self._get_branch(repoman, read_only=True)))
@run_in_thread
def changed_files(self, name):
return self._changed_files(name)
def _changed_files(self, name):
repoman = self._open_repo(name)
return json.dumps(repoman.get_branch_changed_files(
self._get_branch(repoman, read_only=True)))
def save_file(self, name, file_path, file_contents):
repoman = self._open_repo(name)
repoman.save_file(file_path, json.dumps(
file_contents,
sort_keys=True, indent=4), self._get_branch(repoman)) | 0.534127 | 0.069573 |
from collections import defaultdict, namedtuple
import os
import cv2
import numpy as np
import pandas as pd
import utils
class DataPreprocessing:
"""Preprocessing base class. Since resizing is necessary for both train and test set, it is defined here"""
def __init__(self, dataset_parameters, base_csv, dataset_dirs):
self.dataset_parameters = dataset_parameters
self.dataset_parameters.img_shape = np.asarray(self.dataset_parameters.img_shape)
self.base_dataset_dir = dataset_parameters.base_dataset_dir
self.dataset_dirs = dataset_dirs
self.base_csv = base_csv
self.transformation_parameters = namedtuple("Transformation", ["center", "angle", "scale", "offset"])
utils.makedir(dataset_parameters.data_preprocessing_output_dir)
def resize_and_center_data(self):
if utils.is_exists(self.base_csv):
resized_df = pd.read_csv(self.base_csv)
else:
print("Resizing and centering data...")
resized_img_paths = []
resized_landmarks_paths = []
img_paths = self.get_image_paths(self.dataset_dirs)
for img_path in img_paths:
img, landmarks = self.get_image_and_landmarks(img_path)
resizing_parameters = self.get_resizing_transformation_parameters(img, landmarks)
resized_img, resized_landmarks = self.resize_img_and_landmarks(img, landmarks, resizing_parameters)
img_save_path = img_path.replace(self.base_dataset_dir,
self.dataset_parameters.data_preprocessing_output_dir)
utils.save_image(resized_img, img_save_path)
landmarks_save_path = img_save_path[:-4]+".pts"
utils.save_landmarks_as_pts_file(resized_landmarks, landmarks_save_path)
resized_img_paths.append(img_save_path)
resized_landmarks_paths.append(landmarks_save_path)
data_dict = {"image": resized_img_paths, "landmarks": resized_landmarks_paths}
resized_df = self.save_csv(data_dict, self.base_csv)
return resized_df
@staticmethod
def get_image_paths(dataset_dirs):
img_paths = []
for dir_ in dataset_dirs:
files = os.listdir(dir_)
for file_ in files:
if ".pts" not in file_:
full_path = os.path.join(dir_, file_)
img_paths.append(full_path)
return img_paths
@staticmethod
def get_image_and_landmarks(img_path):
img = cv2.imread(img_path)
landmarks = utils.load_pts_file(img_path[:-4] + ".pts")
return img, landmarks
def get_resizing_transformation_parameters(self, img, landmarks):
center = np.mean(landmarks, axis=0)
img_center = np.asarray([x / 2 for x in img.shape[:2]][::-1])
offset = img_center - center
face_size = max(np.max(landmarks, axis=0) - np.min(landmarks, axis=0))
margin = 0.25 # We want face to be centered
desired_size = 1 - 2 * margin
desired_size *= min(self.dataset_parameters.img_shape)
scale = desired_size / face_size
angle = 0
params = self.transformation_parameters(center, angle, scale, offset)
return params
def resize_img_and_landmarks(self, img, landmarks, resizing_parameters):
transformed_img, transformed_landmarks = self.transform_img_and_landmarks(img, landmarks, resizing_parameters)
img_center = np.asarray([x / 2 for x in img.shape[:2]][::-1])
target_img_shape = self.dataset_parameters.img_shape
min_xy = (img_center - target_img_shape / 2).astype(int)
max_xy = (img_center + target_img_shape / 2).astype(int)
resized_img = transformed_img[min_xy[1]:max_xy[1], min_xy[0]:max_xy[0]]
transformed_landmarks -= min_xy
return resized_img, transformed_landmarks
@staticmethod
def transform_img_and_landmarks(img, landmarks, transformation_parameters):
center = transformation_parameters.center
angle = transformation_parameters.angle
scale = transformation_parameters.scale
offset = transformation_parameters.offset
transformed_landmarks = utils.transform_landmarks(landmarks, angle, scale, offset, center)
transformed_img = utils.transform_affine(img, angle, scale, offset, center)
return transformed_img, transformed_landmarks
@staticmethod
def save_csv(data_dict, path_to_save):
df = pd.DataFrame(data_dict)
df.to_csv(path_to_save, index=None, header=True)
return df
def process_images(self):
raise NotImplementedError
class TestsetPreprocessing(DataPreprocessing):
def __init__(self, dataset_parameters, base_csv, dataset_dirs):
super(TestsetPreprocessing, self).__init__(dataset_parameters, base_csv, dataset_dirs)
def process_images(self):
_ = self.resize_and_center_data()
class TrainsetPreprocessing(DataPreprocessing):
"""
2) Normalize scaled images to canonical pose (only for training to investigate its impact).
3) Augment scaled images by randomly scaling, rotating, translating (only for training to investigate its impact).
"""
def __init__(self, dataset_parameters, base_csv, dataset_dirs):
super(TrainsetPreprocessing, self).__init__(dataset_parameters, base_csv, dataset_dirs)
def process_images(self):
resized_df = self.resize_and_center_data()
resized_df = self.mirror_and_save_data(resized_df)
try:
normalized_df = self.normalize_to_canonical_shape(resized_df)
self.augment_data(normalized_df, resized_df)
except RuntimeError:
pass # We don't want neither normalization nor augmentation. But that's still ok.
def mirror_and_save_data(self, resized_df):
mirrored_img_paths = []
mirrored_landmarks_paths = []
if self.dataset_parameters.mirror:
for img_path in resized_df["image"]:
resized_img, resized_landmarks = self.get_image_and_landmarks(img_path)
mirrored_img, mirrored_landmarks = self.mirror_data(resized_img, resized_landmarks)
mirrored_img_path, mirrored_landmarks_path = self.save_data(mirrored_img, mirrored_landmarks, img_path)
mirrored_img_paths.append(mirrored_img_path)
mirrored_landmarks_paths.append(mirrored_landmarks_path)
resized_mirrored_img_paths = list(resized_df["image"]) + mirrored_img_paths
resized_mirrored_landmarks_paths = list(resized_df["landmarks"]) + mirrored_landmarks_paths
data_dict = {"image": resized_mirrored_img_paths, "landmarks": resized_mirrored_landmarks_paths}
resized_df = self.save_csv(data_dict, self.base_csv)
return resized_df
@staticmethod
def mirror_data(img, landmarks):
mirrored_img = np.fliplr(img.copy())
mirrored_landmarks = utils.mirror_landmarks(landmarks, mirrored_img.shape)
return mirrored_img, mirrored_landmarks
@staticmethod
def save_data(img, landmarks, path):
img_save_path = path[:-4] + "m" + path[-4:]
utils.save_image(img, img_save_path)
landmarks_save_path = path[:-4] + "m.pts"
utils.save_landmarks_as_pts_file(landmarks, landmarks_save_path)
return img_save_path, landmarks_save_path
def normalize_to_canonical_shape(self, resized_df):
normalized_path = self.base_csv.replace(".csv", "_normalized.csv")
if utils.is_exists(normalized_path):
normalized_df = pd.read_csv(normalized_path)
elif self.dataset_parameters.n_augmented_images > 0:
print("Normalizing images to canonical pose...")
resized_img_paths = resized_df["image"]
data_dict = defaultdict(lambda: [])
img_idx = 1
for path in resized_img_paths:
resized_img, resized_landmarks = self.get_image_and_landmarks(path)
normalization_parameters = self.get_normalization_transformation_parameters(resized_landmarks)
normalized_img, normalized_landmarks = self.transform_img_and_landmarks(resized_img, resized_landmarks,
normalization_parameters)
img_extension = path[-4:]
path_without_extension = path[:-4]
save_path_template = path_without_extension + "__{}"
save_data_to = save_path_template.format(str(img_idx) + img_extension)
normalized_img_path, normalized_landmarks_path = self.save_data(normalized_img, normalized_landmarks,
save_data_to)
data_dict["image"].append(normalized_img_path)
data_dict["landmarks"].append(normalized_landmarks_path)
normalized_df = self.save_csv(data_dict, normalized_path)
else:
raise RuntimeError
return normalized_df
def get_normalization_transformation_parameters(self, landmarks):
left_eye_center = np.mean(landmarks[36:42], axis=0)
right_eye_center = np.mean(landmarks[42:48], axis=0)
d_y = right_eye_center[1] - left_eye_center[1]
d_x = right_eye_center[0] - left_eye_center[0]
angle = -np.degrees(np.arctan2(d_y, d_x))
center = np.mean(landmarks, axis=0)
offset = 0
scale = 1
params = self.transformation_parameters(center, angle, scale, offset)
return params
def augment_data(self, normalized_df, resized_df):
"""Augments images in the dataset by randomly scaling, rotating and translating. Random samples are
taken from normal distribution"""
augmented_path = self.base_csv.replace(".csv", "_augmented.csv")
if utils.is_exists(augmented_path):
return
elif self.dataset_parameters.n_augmented_images > 1:
notice = ("Data augmentation is being performed. This may take a while according to the number of images"
" and n_augmented_images parameter...")
print(notice)
data_dict = defaultdict(lambda: [])
normalized_img_paths = normalized_df["image"]
transformation_params = self.dataset_parameters.transformation_params
translation_std = np.asarray(transformation_params[:2])*self.dataset_parameters.img_shape
scale_std = transformation_params[2]
rotation_std = transformation_params[3]
for path in normalized_img_paths:
normalized_img, normalized_landmarks = self.get_image_and_landmarks(path)
img_idx = 2
for _ in range(self.dataset_parameters.n_augmented_images-1):
augmentation_parameters = self.get_augmentation_transformation_parameters(rotation_std, scale_std,
translation_std)
augmented_img, augmented_landmarks = self.transform_img_and_landmarks(normalized_img,
normalized_landmarks,
augmentation_parameters)
img_extension = path[-4:]
save_path = path[:-5] + str(img_idx) + img_extension
augmented_img_path, augmented_landmarks_path = self.save_data(augmented_img, augmented_landmarks,
save_path)
data_dict["image"].append(augmented_img_path)
data_dict["landmarks"].append(augmented_landmarks_path)
img_idx += 1
data_dict["image"].extend(list(normalized_df["image"]))
data_dict["landmarks"].extend(list(normalized_df["landmarks"]))
data_dict["image"].extend(list(resized_df["image"]))
data_dict["landmarks"].extend(list(resized_df["landmarks"]))
self.save_csv(data_dict, augmented_path)
def get_augmentation_transformation_parameters(self, rotation_std, scale_std, translation_std):
angle = np.random.normal(0, rotation_std)
offset = (np.random.normal(0, translation_std[0]), np.random.normal(0, translation_std[1]))
scale = np.random.normal(1, scale_std)
center = tuple(self.dataset_parameters.img_shape / 2)
params = self.transformation_parameters(center, angle, scale, offset)
return params | data_preprocessing.py | from collections import defaultdict, namedtuple
import os
import cv2
import numpy as np
import pandas as pd
import utils
class DataPreprocessing:
"""Preprocessing base class. Since resizing is necessary for both train and test set, it is defined here"""
def __init__(self, dataset_parameters, base_csv, dataset_dirs):
self.dataset_parameters = dataset_parameters
self.dataset_parameters.img_shape = np.asarray(self.dataset_parameters.img_shape)
self.base_dataset_dir = dataset_parameters.base_dataset_dir
self.dataset_dirs = dataset_dirs
self.base_csv = base_csv
self.transformation_parameters = namedtuple("Transformation", ["center", "angle", "scale", "offset"])
utils.makedir(dataset_parameters.data_preprocessing_output_dir)
def resize_and_center_data(self):
if utils.is_exists(self.base_csv):
resized_df = pd.read_csv(self.base_csv)
else:
print("Resizing and centering data...")
resized_img_paths = []
resized_landmarks_paths = []
img_paths = self.get_image_paths(self.dataset_dirs)
for img_path in img_paths:
img, landmarks = self.get_image_and_landmarks(img_path)
resizing_parameters = self.get_resizing_transformation_parameters(img, landmarks)
resized_img, resized_landmarks = self.resize_img_and_landmarks(img, landmarks, resizing_parameters)
img_save_path = img_path.replace(self.base_dataset_dir,
self.dataset_parameters.data_preprocessing_output_dir)
utils.save_image(resized_img, img_save_path)
landmarks_save_path = img_save_path[:-4]+".pts"
utils.save_landmarks_as_pts_file(resized_landmarks, landmarks_save_path)
resized_img_paths.append(img_save_path)
resized_landmarks_paths.append(landmarks_save_path)
data_dict = {"image": resized_img_paths, "landmarks": resized_landmarks_paths}
resized_df = self.save_csv(data_dict, self.base_csv)
return resized_df
@staticmethod
def get_image_paths(dataset_dirs):
img_paths = []
for dir_ in dataset_dirs:
files = os.listdir(dir_)
for file_ in files:
if ".pts" not in file_:
full_path = os.path.join(dir_, file_)
img_paths.append(full_path)
return img_paths
@staticmethod
def get_image_and_landmarks(img_path):
img = cv2.imread(img_path)
landmarks = utils.load_pts_file(img_path[:-4] + ".pts")
return img, landmarks
def get_resizing_transformation_parameters(self, img, landmarks):
center = np.mean(landmarks, axis=0)
img_center = np.asarray([x / 2 for x in img.shape[:2]][::-1])
offset = img_center - center
face_size = max(np.max(landmarks, axis=0) - np.min(landmarks, axis=0))
margin = 0.25 # We want face to be centered
desired_size = 1 - 2 * margin
desired_size *= min(self.dataset_parameters.img_shape)
scale = desired_size / face_size
angle = 0
params = self.transformation_parameters(center, angle, scale, offset)
return params
def resize_img_and_landmarks(self, img, landmarks, resizing_parameters):
transformed_img, transformed_landmarks = self.transform_img_and_landmarks(img, landmarks, resizing_parameters)
img_center = np.asarray([x / 2 for x in img.shape[:2]][::-1])
target_img_shape = self.dataset_parameters.img_shape
min_xy = (img_center - target_img_shape / 2).astype(int)
max_xy = (img_center + target_img_shape / 2).astype(int)
resized_img = transformed_img[min_xy[1]:max_xy[1], min_xy[0]:max_xy[0]]
transformed_landmarks -= min_xy
return resized_img, transformed_landmarks
@staticmethod
def transform_img_and_landmarks(img, landmarks, transformation_parameters):
center = transformation_parameters.center
angle = transformation_parameters.angle
scale = transformation_parameters.scale
offset = transformation_parameters.offset
transformed_landmarks = utils.transform_landmarks(landmarks, angle, scale, offset, center)
transformed_img = utils.transform_affine(img, angle, scale, offset, center)
return transformed_img, transformed_landmarks
@staticmethod
def save_csv(data_dict, path_to_save):
df = pd.DataFrame(data_dict)
df.to_csv(path_to_save, index=None, header=True)
return df
def process_images(self):
raise NotImplementedError
class TestsetPreprocessing(DataPreprocessing):
def __init__(self, dataset_parameters, base_csv, dataset_dirs):
super(TestsetPreprocessing, self).__init__(dataset_parameters, base_csv, dataset_dirs)
def process_images(self):
_ = self.resize_and_center_data()
class TrainsetPreprocessing(DataPreprocessing):
"""
2) Normalize scaled images to canonical pose (only for training to investigate its impact).
3) Augment scaled images by randomly scaling, rotating, translating (only for training to investigate its impact).
"""
def __init__(self, dataset_parameters, base_csv, dataset_dirs):
super(TrainsetPreprocessing, self).__init__(dataset_parameters, base_csv, dataset_dirs)
def process_images(self):
resized_df = self.resize_and_center_data()
resized_df = self.mirror_and_save_data(resized_df)
try:
normalized_df = self.normalize_to_canonical_shape(resized_df)
self.augment_data(normalized_df, resized_df)
except RuntimeError:
pass # We don't want neither normalization nor augmentation. But that's still ok.
def mirror_and_save_data(self, resized_df):
mirrored_img_paths = []
mirrored_landmarks_paths = []
if self.dataset_parameters.mirror:
for img_path in resized_df["image"]:
resized_img, resized_landmarks = self.get_image_and_landmarks(img_path)
mirrored_img, mirrored_landmarks = self.mirror_data(resized_img, resized_landmarks)
mirrored_img_path, mirrored_landmarks_path = self.save_data(mirrored_img, mirrored_landmarks, img_path)
mirrored_img_paths.append(mirrored_img_path)
mirrored_landmarks_paths.append(mirrored_landmarks_path)
resized_mirrored_img_paths = list(resized_df["image"]) + mirrored_img_paths
resized_mirrored_landmarks_paths = list(resized_df["landmarks"]) + mirrored_landmarks_paths
data_dict = {"image": resized_mirrored_img_paths, "landmarks": resized_mirrored_landmarks_paths}
resized_df = self.save_csv(data_dict, self.base_csv)
return resized_df
@staticmethod
def mirror_data(img, landmarks):
mirrored_img = np.fliplr(img.copy())
mirrored_landmarks = utils.mirror_landmarks(landmarks, mirrored_img.shape)
return mirrored_img, mirrored_landmarks
@staticmethod
def save_data(img, landmarks, path):
img_save_path = path[:-4] + "m" + path[-4:]
utils.save_image(img, img_save_path)
landmarks_save_path = path[:-4] + "m.pts"
utils.save_landmarks_as_pts_file(landmarks, landmarks_save_path)
return img_save_path, landmarks_save_path
def normalize_to_canonical_shape(self, resized_df):
normalized_path = self.base_csv.replace(".csv", "_normalized.csv")
if utils.is_exists(normalized_path):
normalized_df = pd.read_csv(normalized_path)
elif self.dataset_parameters.n_augmented_images > 0:
print("Normalizing images to canonical pose...")
resized_img_paths = resized_df["image"]
data_dict = defaultdict(lambda: [])
img_idx = 1
for path in resized_img_paths:
resized_img, resized_landmarks = self.get_image_and_landmarks(path)
normalization_parameters = self.get_normalization_transformation_parameters(resized_landmarks)
normalized_img, normalized_landmarks = self.transform_img_and_landmarks(resized_img, resized_landmarks,
normalization_parameters)
img_extension = path[-4:]
path_without_extension = path[:-4]
save_path_template = path_without_extension + "__{}"
save_data_to = save_path_template.format(str(img_idx) + img_extension)
normalized_img_path, normalized_landmarks_path = self.save_data(normalized_img, normalized_landmarks,
save_data_to)
data_dict["image"].append(normalized_img_path)
data_dict["landmarks"].append(normalized_landmarks_path)
normalized_df = self.save_csv(data_dict, normalized_path)
else:
raise RuntimeError
return normalized_df
def get_normalization_transformation_parameters(self, landmarks):
left_eye_center = np.mean(landmarks[36:42], axis=0)
right_eye_center = np.mean(landmarks[42:48], axis=0)
d_y = right_eye_center[1] - left_eye_center[1]
d_x = right_eye_center[0] - left_eye_center[0]
angle = -np.degrees(np.arctan2(d_y, d_x))
center = np.mean(landmarks, axis=0)
offset = 0
scale = 1
params = self.transformation_parameters(center, angle, scale, offset)
return params
def augment_data(self, normalized_df, resized_df):
"""Augments images in the dataset by randomly scaling, rotating and translating. Random samples are
taken from normal distribution"""
augmented_path = self.base_csv.replace(".csv", "_augmented.csv")
if utils.is_exists(augmented_path):
return
elif self.dataset_parameters.n_augmented_images > 1:
notice = ("Data augmentation is being performed. This may take a while according to the number of images"
" and n_augmented_images parameter...")
print(notice)
data_dict = defaultdict(lambda: [])
normalized_img_paths = normalized_df["image"]
transformation_params = self.dataset_parameters.transformation_params
translation_std = np.asarray(transformation_params[:2])*self.dataset_parameters.img_shape
scale_std = transformation_params[2]
rotation_std = transformation_params[3]
for path in normalized_img_paths:
normalized_img, normalized_landmarks = self.get_image_and_landmarks(path)
img_idx = 2
for _ in range(self.dataset_parameters.n_augmented_images-1):
augmentation_parameters = self.get_augmentation_transformation_parameters(rotation_std, scale_std,
translation_std)
augmented_img, augmented_landmarks = self.transform_img_and_landmarks(normalized_img,
normalized_landmarks,
augmentation_parameters)
img_extension = path[-4:]
save_path = path[:-5] + str(img_idx) + img_extension
augmented_img_path, augmented_landmarks_path = self.save_data(augmented_img, augmented_landmarks,
save_path)
data_dict["image"].append(augmented_img_path)
data_dict["landmarks"].append(augmented_landmarks_path)
img_idx += 1
data_dict["image"].extend(list(normalized_df["image"]))
data_dict["landmarks"].extend(list(normalized_df["landmarks"]))
data_dict["image"].extend(list(resized_df["image"]))
data_dict["landmarks"].extend(list(resized_df["landmarks"]))
self.save_csv(data_dict, augmented_path)
def get_augmentation_transformation_parameters(self, rotation_std, scale_std, translation_std):
angle = np.random.normal(0, rotation_std)
offset = (np.random.normal(0, translation_std[0]), np.random.normal(0, translation_std[1]))
scale = np.random.normal(1, scale_std)
center = tuple(self.dataset_parameters.img_shape / 2)
params = self.transformation_parameters(center, angle, scale, offset)
return params | 0.723114 | 0.279116 |
# Import Modules
import numpy as np
import scipy.special
from scipy.optimize import root
import logging
from ep_clustering._utils import fix_docs, logsumexp
from ep_clustering.likelihoods._likelihoods import Likelihood
from ep_clustering.likelihoods._slice_sampler import SliceSampler
from ep_clustering.exp_family._von_mises_fisher import (
VonMisesFisherFamily,
VonMisesFisherProdGammaFamily,
amos_asymptotic_log_iv,
)
from spherecluster import sample_vMF
MAX_CONCENTRATION = 10.0**9
MIN_CONCENTRATION = 10**-3
logger = logging.getLogger(name=__name__)
LOGGING_FORMAT = '%(levelname)s: %(asctime)s - %(name)s: %(message)s ...'
logging.basicConfig(
level = logging.INFO,
format = LOGGING_FORMAT,
)
@fix_docs
class FixedVonMisesFisherLikelihood(Likelihood):
""" Von Mises Fisher Likelihood with fixed concentration
Args:
concentration_update (string): method for updating concentration
"map": (default) use the MAP estimator
"slice_sampler": slow
num_slice_steps (int): number of slice sampler steps
**kwargs:
concentration (double) - concentration (a.k.a. kappa)
"""
# Inherit Docstrings
__doc__ += Likelihood.__doc__
# Class Variables
name = "FixedVonMisesFisher"
def __init__(self, data, concentration_update="map",
num_slice_steps=5, **kwargs):
self.y = data.matrix
self.num_dim = data.num_dim
super(FixedVonMisesFisherLikelihood, self).__init__(data, **kwargs)
self.concentration_update = concentration_update
self.num_slice_steps = num_slice_steps
return
def deepcopy(self):
""" Return a copy """
other = type(self)(data = self.data,
concentration_update=self.concentration_update,
num_slice_steps=self.num_slice_steps,
theta_prior=self.theta_prior)
other.parameter = self.parameter.deepcopy()
other.prior = self.prior.deepcopy()
return other
def _get_default_prior(self):
theta_prior = VonMisesFisherFamily(
num_dim = self.num_dim,
mean=np.ones(self.num_dim)/np.sqrt(self.num_dim) * 1e-9)
return theta_prior
def _get_default_parameters(self):
"""Returns default parameters dict"""
default_parameter = {
"concentration": 1.0,
}
return default_parameter
def _get_default_parameters_prior(self):
"""Returns default parameters prior dict"""
prior = {
"alpha_concentration0": 2.0,
"beta_concentration0": 0.1,
}
return prior
def _sample_from_prior(self):
parameter = {
"concentration": 1.0/np.random.gamma(
shape=self.prior.alpha_concentration0,
scale=self.prior.beta_concentration0,
size=1)
}
return parameter
def loglikelihood(self, index, theta):
y_index = self.y[index]
order = (0.5 * self.num_dim - 1)
loglikelihood = self.parameter.concentration * theta.dot(y_index) + \
order * np.log(self.parameter.concentration) + \
-0.5*self.num_dim*np.sqrt(2*np.pi) + \
-amos_asymptotic_log_iv(order, self.parameter.concentration)
return loglikelihood
def collapsed(self, index, subset_indices, theta_parameter):
loglikelihood = 0.0
cavity_posterior = theta_parameter
for s_index in subset_indices:
s_y = self.y[s_index]
cavity_posterior = (cavity_posterior + VonMisesFisherFamily(
num_dim=self.num_dim,
mean=s_y*self.parameter.concentration,
))
loglikelihood -= cavity_posterior.logpartition()
y = self.y[index]
likelihood = VonMisesFisherFamily(
num_dim=self.num_dim,
mean=y*self.parameter.concentration,
)
loglikelihood -= likelihood.logpartition()
posterior = cavity_posterior + likelihood
loglikelihood += posterior.logpartition()
return loglikelihood
def moment(self, index, theta_parameter):
y_index = self.y[index]
site = VonMisesFisherFamily(
num_dim=self.num_dim,
mean=y_index * self.parameter.concentration,
)
unnormalized_post_approx = (theta_parameter + site)
unnormalized_post_approx.log_scaling_coef = \
unnormalized_post_approx.logpartition() - \
(theta_parameter.logpartition() + site.logpartition())
return unnormalized_post_approx
def sample(self, indices, prior_parameter):
posterior = prior_parameter
for index in indices:
y_index = self.y[index]
posterior = posterior + VonMisesFisherFamily(
num_dim=self.num_dim,
mean=y_index * self.parameter.concentration,
)
return posterior.sample()
def update_parameters(self, z, theta, parameter_name = None):
if parameter_name is None:
self._update_concentration(z, theta)
elif parameter_name == "variance":
self._update_concentration(z, theta)
else:
raise ValueError("Unrecognized parameter_name: " + parameter_name)
return
def _update_concentration(self, z, theta, k_list=None):
if k_list is None:
k_list = range(np.shape(theta)[0])
if self.concentration_update == "map":
# MAP Estimator Update from
# http://www.jmlr.org/papers/volume6/banerjee05a/banerjee05a.pdf
kappa, n = 0.0, 0.0
for k in k_list:
ind = (z == k)
n_k = (np.sum(ind)*1.0)
r_bar_k = np.linalg.norm(np.sum(self.y[ind,:], axis=0))/n_k
r_bar_k *= (1-1e-6)
kappa_k = (r_bar_k*self.num_dim - r_bar_k**3)/(1.0 - r_bar_k**2)
if kappa_k > MAX_CONCENTRATION:
kappa_k = MAX_CONCENTRATION
kappa += n_k * kappa_k
n += n_k
self.parameter.concentration = kappa/n
if n == 0:
self.parameter.concentration = MIN_CONCENTRATION
if (np.isinf(self.parameter.concentration) or
np.isnan(self.parameter.concentration)):
raise ValueError("concentration is invalid")
elif self.concentration_update == "slice_sampler":
# Slice Sampler Update
logprior = lambda kappa: scipy.stats.gamma.logpdf(
kappa, a=self.prior.alpha_concentration0,
scale=1.0/self.prior.beta_concentration0,
)
n = 0.0
mu_T_x = 0.0
for k in k_list:
ind = (z == k)
n += (np.sum(ind)*1.0)
mu_T_x += np.dot(theta[k], np.sum(self.y[ind,:], axis=0))
order = self.num_dim/2.0 - 1.0
def logf(kappa):
logf = logprior(kappa)
logf += kappa * mu_T_x
logf += n * order * np.log(kappa)
logf -= n * amos_asymptotic_log_iv(order, kappa)
return logf
slice_sampler = SliceSampler(
logf=logf, lower_bound=0.0,
num_steps=self.num_slice_steps)
self.parameter.concentration = slice_sampler.sample(
x_init = self.parameter.concentration,
)
if (np.isinf(self.parameter.concentration) or
np.isnan(self.parameter.concentration)):
raise ValueError("concentration is invalid")
else:
raise NotImplementedError(
"Unrecognized `concentration_update`={0}".format(
self.concentration_update,
))
return
def update_local_parameters(self, k, z, theta, parameter_name = None):
if parameter_name is None:
self._update_concentration(z, theta, k_list=[k])
elif parameter_name == "concentration":
self._update_concentration(z, theta, k_list=[k])
else:
raise ValueError("Unrecognized parameter_name: " + parameter_name)
return
@fix_docs
class VonMisesFisherLikelihood(Likelihood):
""" Von Mises Fisher Likelihood
Args:
moment_update (string):
'exact' - use root finding to match sufficient statistics
'variance' - use algebra to match first two moments (faster)
decay_factor (double):
decay factor for posterior moment natural parameters
breaks (int): number of points used in numerical integration
**kwargs:
"""
# Inherit Docstrings
__doc__ += Likelihood.__doc__
# Class Variables
name = "VonMisesFisher"
def __init__(self, data, moment_update='exact', decay_factor=1.0, breaks=20,
**kwargs):
self.y = data.matrix
self.num_dim = data.num_dim
self.moment_update = moment_update
self.decay_factor = decay_factor
if not isinstance(breaks, int):
raise TypeError("breaks must be an int")
self.breaks = breaks
super(VonMisesFisherLikelihood, self).__init__(data, **kwargs)
return
def deepcopy(self):
""" Return a copy """
other = type(self)(data = self.data,
moment_update=self.moment_update,
decay_factor=self.decay_factor,
breaks=self.breaks,
theta_prior=self.theta_prior)
other.parameter = self.parameter.deepcopy()
other.prior = self.prior.deepcopy()
return other
def _get_default_prior(self):
theta_prior = VonMisesFisherProdGammaFamily(
num_dim = self.num_dim,
mean=np.ones(self.num_dim)/np.sqrt(self.num_dim) * 1e-9,
alpha_minus_one=1.0,
beta=0.1,
)
return theta_prior
def _get_default_parameters(self):
"""Returns default parameters dict"""
default_parameter = {}
return default_parameter
def _get_default_parameters_prior(self):
"""Returns default parameters prior dict"""
prior = {}
return prior
def _sample_from_prior(self):
parameter = {}
return parameter
def loglikelihood(self, index, theta):
y_index = self.y[index]
order = (0.5 * self.num_dim - 1)
loglikelihood = theta['concentration'] * theta['mean'].dot(y_index) + \
order * np.log(theta['concentration']) + \
-0.5*self.num_dim*np.sqrt(2*np.pi) + \
-amos_asymptotic_log_iv(order, theta['concentration'])
return loglikelihood
def collapsed(self, index, subset_indices, theta_parameter):
raise NotImplementedError("collapsed likelihood not implemented")
def ep_loglikelihood(self, index, theta_parameter):
approx_loglikelihood = 0.0
y_index = self.y[index]
cavity_posterior = theta_parameter
kappas = cavity_posterior._get_concentration_quantiles(
breaks=self.breaks)
weights = cavity_posterior._get_concentration_quantile_weights(kappas)
site_logpart = cavity_posterior._get_concentration_logpartitions(kappas)
cavity_logpart = cavity_posterior._get_concentration_logpartitions(
kappas * np.linalg.norm(
cavity_posterior.natural_parameters['mean']
)
)
post_approx_logpart = cavity_posterior._get_concentration_logpartitions(
kappas * np.linalg.norm(
y_index + cavity_posterior.natural_parameters['mean']
)
)
approx_loglikelihood = logsumexp(
post_approx_logpart - site_logpart - cavity_logpart,
weights)
return approx_loglikelihood
def moment(self, index, theta_parameter):
y_index = self.y[index]
kappas = theta_parameter._get_concentration_quantiles(
breaks=self.breaks)
weights = theta_parameter._get_concentration_quantile_weights(kappas)
site_logpart = theta_parameter._get_concentration_logpartitions(kappas)
cavity_logpart = theta_parameter._get_concentration_logpartitions(
kappas * np.linalg.norm(
theta_parameter.natural_parameters['mean']
)
)
post_approx_logpart = theta_parameter._get_concentration_logpartitions(
kappas * np.linalg.norm(
y_index + theta_parameter.natural_parameters['mean']
)
)
logparts = post_approx_logpart - site_logpart - cavity_logpart
# Calculate Sufficient Statistic Moments
logpartition = logsumexp(logparts, weights)
mean_kappa = np.exp(
logsumexp(logparts, weights * kappas) -
logpartition
)
mean_kappa_2 = np.exp(
logsumexp(logparts, weights * kappas**2) -
logpartition
)
var_kappa = mean_kappa_2 - mean_kappa**2
if np.isnan(mean_kappa) or mean_kappa < 0:
raise ValueError("Invalid Mean_Kappa")
# Convert Moments to Alpha + Beta
if self.moment_update == 'exact':
mean_log_kappa = np.exp(
logsumexp(logparts, weights * np.log(kappas)) -
logpartition
)
beta0 = mean_kappa / var_kappa
alpha0 = mean_kappa * beta0
def fun(x):
return (scipy.special.digamma(x) - np.log(x) +
np.log(mean_kappa) - mean_log_kappa)
alpha = root(fun, alpha0).x[0]
beta = alpha/mean_kappa
elif self.moment_update == 'variance':
beta = mean_kappa / var_kappa
alpha = mean_kappa * beta
else:
raise ValueError("Unrecognized moment_update `{0}`".format(
self.moment_update))
# Apply Decay Factor
if self.decay_factor < 1.0:
alpha_minus_one_diff = (alpha - 1) - \
theta_parameter.natural_parameters['alpha_minus_one']
beta_diff = beta - \
theta_parameter.natural_parameters['beta']
alpha = (self.decay_factor * alpha_minus_one_diff) + 1 + \
theta_parameter.natural_parameters['alpha_minus_one']
beta = (self.decay_factor * beta_diff) + \
theta_parameter.natural_parameters['beta']
# Return post approx
unnormalized_post_approx = theta_parameter.copy()
unnormalized_post_approx.natural_parameters['mean'] += y_index
unnormalized_post_approx.natural_parameters['alpha_minus_one'] = \
(alpha - 1.0) * self.decay_factor
unnormalized_post_approx.natural_parameters['beta'] = \
beta * self.decay_factor
unnormalized_post_approx.log_scaling_coef = logpartition
return unnormalized_post_approx
def sample(self, indices, prior_parameter):
raise NotImplementedError("sample theta not implemented")
def update_parameters(self, z, theta, parameter_name = None):
if parameter_name is not None:
raise ValueError("Unrecognized parameter_name: " + parameter_name)
return
def update_local_parameters(self, k, z, theta, parameter_name = None):
if parameter_name is not None:
raise ValueError("Unrecognized parameter_name: " + parameter_name)
return | ep_clustering/likelihoods/_von_mises_fisher_likelihood.py | # Import Modules
import numpy as np
import scipy.special
from scipy.optimize import root
import logging
from ep_clustering._utils import fix_docs, logsumexp
from ep_clustering.likelihoods._likelihoods import Likelihood
from ep_clustering.likelihoods._slice_sampler import SliceSampler
from ep_clustering.exp_family._von_mises_fisher import (
VonMisesFisherFamily,
VonMisesFisherProdGammaFamily,
amos_asymptotic_log_iv,
)
from spherecluster import sample_vMF
MAX_CONCENTRATION = 10.0**9
MIN_CONCENTRATION = 10**-3
logger = logging.getLogger(name=__name__)
LOGGING_FORMAT = '%(levelname)s: %(asctime)s - %(name)s: %(message)s ...'
logging.basicConfig(
level = logging.INFO,
format = LOGGING_FORMAT,
)
@fix_docs
class FixedVonMisesFisherLikelihood(Likelihood):
""" Von Mises Fisher Likelihood with fixed concentration
Args:
concentration_update (string): method for updating concentration
"map": (default) use the MAP estimator
"slice_sampler": slow
num_slice_steps (int): number of slice sampler steps
**kwargs:
concentration (double) - concentration (a.k.a. kappa)
"""
# Inherit Docstrings
__doc__ += Likelihood.__doc__
# Class Variables
name = "FixedVonMisesFisher"
def __init__(self, data, concentration_update="map",
num_slice_steps=5, **kwargs):
self.y = data.matrix
self.num_dim = data.num_dim
super(FixedVonMisesFisherLikelihood, self).__init__(data, **kwargs)
self.concentration_update = concentration_update
self.num_slice_steps = num_slice_steps
return
def deepcopy(self):
""" Return a copy """
other = type(self)(data = self.data,
concentration_update=self.concentration_update,
num_slice_steps=self.num_slice_steps,
theta_prior=self.theta_prior)
other.parameter = self.parameter.deepcopy()
other.prior = self.prior.deepcopy()
return other
def _get_default_prior(self):
theta_prior = VonMisesFisherFamily(
num_dim = self.num_dim,
mean=np.ones(self.num_dim)/np.sqrt(self.num_dim) * 1e-9)
return theta_prior
def _get_default_parameters(self):
"""Returns default parameters dict"""
default_parameter = {
"concentration": 1.0,
}
return default_parameter
def _get_default_parameters_prior(self):
"""Returns default parameters prior dict"""
prior = {
"alpha_concentration0": 2.0,
"beta_concentration0": 0.1,
}
return prior
def _sample_from_prior(self):
parameter = {
"concentration": 1.0/np.random.gamma(
shape=self.prior.alpha_concentration0,
scale=self.prior.beta_concentration0,
size=1)
}
return parameter
def loglikelihood(self, index, theta):
y_index = self.y[index]
order = (0.5 * self.num_dim - 1)
loglikelihood = self.parameter.concentration * theta.dot(y_index) + \
order * np.log(self.parameter.concentration) + \
-0.5*self.num_dim*np.sqrt(2*np.pi) + \
-amos_asymptotic_log_iv(order, self.parameter.concentration)
return loglikelihood
def collapsed(self, index, subset_indices, theta_parameter):
loglikelihood = 0.0
cavity_posterior = theta_parameter
for s_index in subset_indices:
s_y = self.y[s_index]
cavity_posterior = (cavity_posterior + VonMisesFisherFamily(
num_dim=self.num_dim,
mean=s_y*self.parameter.concentration,
))
loglikelihood -= cavity_posterior.logpartition()
y = self.y[index]
likelihood = VonMisesFisherFamily(
num_dim=self.num_dim,
mean=y*self.parameter.concentration,
)
loglikelihood -= likelihood.logpartition()
posterior = cavity_posterior + likelihood
loglikelihood += posterior.logpartition()
return loglikelihood
def moment(self, index, theta_parameter):
y_index = self.y[index]
site = VonMisesFisherFamily(
num_dim=self.num_dim,
mean=y_index * self.parameter.concentration,
)
unnormalized_post_approx = (theta_parameter + site)
unnormalized_post_approx.log_scaling_coef = \
unnormalized_post_approx.logpartition() - \
(theta_parameter.logpartition() + site.logpartition())
return unnormalized_post_approx
def sample(self, indices, prior_parameter):
posterior = prior_parameter
for index in indices:
y_index = self.y[index]
posterior = posterior + VonMisesFisherFamily(
num_dim=self.num_dim,
mean=y_index * self.parameter.concentration,
)
return posterior.sample()
def update_parameters(self, z, theta, parameter_name = None):
if parameter_name is None:
self._update_concentration(z, theta)
elif parameter_name == "variance":
self._update_concentration(z, theta)
else:
raise ValueError("Unrecognized parameter_name: " + parameter_name)
return
def _update_concentration(self, z, theta, k_list=None):
if k_list is None:
k_list = range(np.shape(theta)[0])
if self.concentration_update == "map":
# MAP Estimator Update from
# http://www.jmlr.org/papers/volume6/banerjee05a/banerjee05a.pdf
kappa, n = 0.0, 0.0
for k in k_list:
ind = (z == k)
n_k = (np.sum(ind)*1.0)
r_bar_k = np.linalg.norm(np.sum(self.y[ind,:], axis=0))/n_k
r_bar_k *= (1-1e-6)
kappa_k = (r_bar_k*self.num_dim - r_bar_k**3)/(1.0 - r_bar_k**2)
if kappa_k > MAX_CONCENTRATION:
kappa_k = MAX_CONCENTRATION
kappa += n_k * kappa_k
n += n_k
self.parameter.concentration = kappa/n
if n == 0:
self.parameter.concentration = MIN_CONCENTRATION
if (np.isinf(self.parameter.concentration) or
np.isnan(self.parameter.concentration)):
raise ValueError("concentration is invalid")
elif self.concentration_update == "slice_sampler":
# Slice Sampler Update
logprior = lambda kappa: scipy.stats.gamma.logpdf(
kappa, a=self.prior.alpha_concentration0,
scale=1.0/self.prior.beta_concentration0,
)
n = 0.0
mu_T_x = 0.0
for k in k_list:
ind = (z == k)
n += (np.sum(ind)*1.0)
mu_T_x += np.dot(theta[k], np.sum(self.y[ind,:], axis=0))
order = self.num_dim/2.0 - 1.0
def logf(kappa):
logf = logprior(kappa)
logf += kappa * mu_T_x
logf += n * order * np.log(kappa)
logf -= n * amos_asymptotic_log_iv(order, kappa)
return logf
slice_sampler = SliceSampler(
logf=logf, lower_bound=0.0,
num_steps=self.num_slice_steps)
self.parameter.concentration = slice_sampler.sample(
x_init = self.parameter.concentration,
)
if (np.isinf(self.parameter.concentration) or
np.isnan(self.parameter.concentration)):
raise ValueError("concentration is invalid")
else:
raise NotImplementedError(
"Unrecognized `concentration_update`={0}".format(
self.concentration_update,
))
return
def update_local_parameters(self, k, z, theta, parameter_name = None):
if parameter_name is None:
self._update_concentration(z, theta, k_list=[k])
elif parameter_name == "concentration":
self._update_concentration(z, theta, k_list=[k])
else:
raise ValueError("Unrecognized parameter_name: " + parameter_name)
return
@fix_docs
class VonMisesFisherLikelihood(Likelihood):
""" Von Mises Fisher Likelihood
Args:
moment_update (string):
'exact' - use root finding to match sufficient statistics
'variance' - use algebra to match first two moments (faster)
decay_factor (double):
decay factor for posterior moment natural parameters
breaks (int): number of points used in numerical integration
**kwargs:
"""
# Inherit Docstrings
__doc__ += Likelihood.__doc__
# Class Variables
name = "VonMisesFisher"
def __init__(self, data, moment_update='exact', decay_factor=1.0, breaks=20,
**kwargs):
self.y = data.matrix
self.num_dim = data.num_dim
self.moment_update = moment_update
self.decay_factor = decay_factor
if not isinstance(breaks, int):
raise TypeError("breaks must be an int")
self.breaks = breaks
super(VonMisesFisherLikelihood, self).__init__(data, **kwargs)
return
def deepcopy(self):
""" Return a copy """
other = type(self)(data = self.data,
moment_update=self.moment_update,
decay_factor=self.decay_factor,
breaks=self.breaks,
theta_prior=self.theta_prior)
other.parameter = self.parameter.deepcopy()
other.prior = self.prior.deepcopy()
return other
def _get_default_prior(self):
theta_prior = VonMisesFisherProdGammaFamily(
num_dim = self.num_dim,
mean=np.ones(self.num_dim)/np.sqrt(self.num_dim) * 1e-9,
alpha_minus_one=1.0,
beta=0.1,
)
return theta_prior
def _get_default_parameters(self):
"""Returns default parameters dict"""
default_parameter = {}
return default_parameter
def _get_default_parameters_prior(self):
"""Returns default parameters prior dict"""
prior = {}
return prior
def _sample_from_prior(self):
parameter = {}
return parameter
def loglikelihood(self, index, theta):
y_index = self.y[index]
order = (0.5 * self.num_dim - 1)
loglikelihood = theta['concentration'] * theta['mean'].dot(y_index) + \
order * np.log(theta['concentration']) + \
-0.5*self.num_dim*np.sqrt(2*np.pi) + \
-amos_asymptotic_log_iv(order, theta['concentration'])
return loglikelihood
def collapsed(self, index, subset_indices, theta_parameter):
raise NotImplementedError("collapsed likelihood not implemented")
def ep_loglikelihood(self, index, theta_parameter):
approx_loglikelihood = 0.0
y_index = self.y[index]
cavity_posterior = theta_parameter
kappas = cavity_posterior._get_concentration_quantiles(
breaks=self.breaks)
weights = cavity_posterior._get_concentration_quantile_weights(kappas)
site_logpart = cavity_posterior._get_concentration_logpartitions(kappas)
cavity_logpart = cavity_posterior._get_concentration_logpartitions(
kappas * np.linalg.norm(
cavity_posterior.natural_parameters['mean']
)
)
post_approx_logpart = cavity_posterior._get_concentration_logpartitions(
kappas * np.linalg.norm(
y_index + cavity_posterior.natural_parameters['mean']
)
)
approx_loglikelihood = logsumexp(
post_approx_logpart - site_logpart - cavity_logpart,
weights)
return approx_loglikelihood
def moment(self, index, theta_parameter):
y_index = self.y[index]
kappas = theta_parameter._get_concentration_quantiles(
breaks=self.breaks)
weights = theta_parameter._get_concentration_quantile_weights(kappas)
site_logpart = theta_parameter._get_concentration_logpartitions(kappas)
cavity_logpart = theta_parameter._get_concentration_logpartitions(
kappas * np.linalg.norm(
theta_parameter.natural_parameters['mean']
)
)
post_approx_logpart = theta_parameter._get_concentration_logpartitions(
kappas * np.linalg.norm(
y_index + theta_parameter.natural_parameters['mean']
)
)
logparts = post_approx_logpart - site_logpart - cavity_logpart
# Calculate Sufficient Statistic Moments
logpartition = logsumexp(logparts, weights)
mean_kappa = np.exp(
logsumexp(logparts, weights * kappas) -
logpartition
)
mean_kappa_2 = np.exp(
logsumexp(logparts, weights * kappas**2) -
logpartition
)
var_kappa = mean_kappa_2 - mean_kappa**2
if np.isnan(mean_kappa) or mean_kappa < 0:
raise ValueError("Invalid Mean_Kappa")
# Convert Moments to Alpha + Beta
if self.moment_update == 'exact':
mean_log_kappa = np.exp(
logsumexp(logparts, weights * np.log(kappas)) -
logpartition
)
beta0 = mean_kappa / var_kappa
alpha0 = mean_kappa * beta0
def fun(x):
return (scipy.special.digamma(x) - np.log(x) +
np.log(mean_kappa) - mean_log_kappa)
alpha = root(fun, alpha0).x[0]
beta = alpha/mean_kappa
elif self.moment_update == 'variance':
beta = mean_kappa / var_kappa
alpha = mean_kappa * beta
else:
raise ValueError("Unrecognized moment_update `{0}`".format(
self.moment_update))
# Apply Decay Factor
if self.decay_factor < 1.0:
alpha_minus_one_diff = (alpha - 1) - \
theta_parameter.natural_parameters['alpha_minus_one']
beta_diff = beta - \
theta_parameter.natural_parameters['beta']
alpha = (self.decay_factor * alpha_minus_one_diff) + 1 + \
theta_parameter.natural_parameters['alpha_minus_one']
beta = (self.decay_factor * beta_diff) + \
theta_parameter.natural_parameters['beta']
# Return post approx
unnormalized_post_approx = theta_parameter.copy()
unnormalized_post_approx.natural_parameters['mean'] += y_index
unnormalized_post_approx.natural_parameters['alpha_minus_one'] = \
(alpha - 1.0) * self.decay_factor
unnormalized_post_approx.natural_parameters['beta'] = \
beta * self.decay_factor
unnormalized_post_approx.log_scaling_coef = logpartition
return unnormalized_post_approx
def sample(self, indices, prior_parameter):
raise NotImplementedError("sample theta not implemented")
def update_parameters(self, z, theta, parameter_name = None):
if parameter_name is not None:
raise ValueError("Unrecognized parameter_name: " + parameter_name)
return
def update_local_parameters(self, k, z, theta, parameter_name = None):
if parameter_name is not None:
raise ValueError("Unrecognized parameter_name: " + parameter_name)
return | 0.770422 | 0.465327 |
from .coloring import EffectSupporter
from .convolution import GaussianBlur
from ...base import SecondPassRenderer, BaseRenderer
from ...uniformed import UniformedRenderer
from ...util import sample_vertex_shader, gen_screen_mesh
from ....gl.shader import ShaderProgram
from ....gl.framebuffer import FrameBuffer, FB_NONE
from ....model.model import RenderCompound, Material
class LightExtractorRenderer(SecondPassRenderer):
_vert_shader = sample_vertex_shader
# language=GLSL
_frag_shader = '''\
#version 430 core
in vec2 tex_coords;
out vec4 out_color;
uniform sampler2D tex_img;
uniform float limit;
void main() {
vec4 curr_texel = texture(tex_img, tex_coords);
if (dot(curr_texel.xyz, vec3(1)) > limit) out_color = curr_texel;
else out_color = vec4(vec3(0), 1);
}
'''
def __init__(self, width, height, color_buffer_type=1):
self.fbo = FrameBuffer(width, height, color_buffer_type, FB_NONE)
self.shader_prog = ShaderProgram(self._vert_shader, self._frag_shader, use=True)
self.meshes = (RenderCompound(gen_screen_mesh(), Material(self.fbo.color_buffers)), )
self.brightness_limit_setter = self.shader_prog.get_uniform_setter('limit', '1f')
self.brightness_limit_setter(1)
draw = BaseRenderer.draw
class LightMergerRenderer(EffectSupporter):
_vert_shader = sample_vertex_shader
# language=GLSL
_frag_shader = '''\
#version 430 core
in vec2 tex_coords;
out vec4 out_color;
uniform sampler2D raw_img;
uniform sampler2D blurred_img;
uniform float bloom_brightness;
/* uniforms */
void main() {
out_color = texture(raw_img, tex_coords) + texture(blurred_img, tex_coords) * bloom_brightness;
/* main */
}
'''
_frag_out_color_name = 'out_color'
def __init__(self, width, height, src_col_buffer, color_buffer_type=1, custom_effects=()):
super(LightMergerRenderer, self).__init__(custom_effects)
self.fbo = FrameBuffer(width, height, color_buffer_type, FB_NONE)
uniformed = UniformedRenderer(self)
self.meshes = (RenderCompound(gen_screen_mesh(),
Material(((src_col_buffer, uniformed.sampler_data['raw_img'].bind_index),
(self.fbo.color_buffers[0],
uniformed.sampler_data['blurred_img'].bind_index)))
), )
del uniformed
brightness = self.shader_prog.get_uniform_setter('bloom_brightness', '1f')
brightness(0.2)
self.effect_value_setters['brightness'] = brightness
draw = BaseRenderer.draw
class Bloom(SecondPassRenderer):
def __init__(self, width, height, color_buffer_type=1, additional_post_effects=(), blur_points=51, num_passes=1,
brightness=0.1, light_limit=1):
super(Bloom, self).__init__()
self.extract_pass = LightExtractorRenderer(width, height, color_buffer_type)
self.blur_pass = GaussianBlur(width, height, blur_points, color_buffer_type)
self.merge_pass = LightMergerRenderer(width, height, self.extract_pass.fbo.color_buffers[0], color_buffer_type,
additional_post_effects)
self.merge_pass.shader_prog.use()
self.merge_pass.set_effect_value('brightness', brightness)
self.extract_pass.shader_prog.use()
self.extract_pass.brightness_limit_setter(light_limit)
self.meshes = self.extract_pass.meshes
self.fbo = self.extract_pass.fbo
self.num_additional_passes = num_passes - 1
def draw(self, out_fbo, data):
self.extract_pass.draw(self.blur_pass.fbo, data)
for _ in range(self.num_additional_passes):
self.blur_pass.draw(self.blur_pass.fbo, self.blur_pass.meshes)
self.blur_pass.draw(self.merge_pass.fbo, self.blur_pass.meshes)
self.merge_pass.draw(out_fbo, self.merge_pass.meshes) | engine/renderer/presets/fancy/bloom.py | from .coloring import EffectSupporter
from .convolution import GaussianBlur
from ...base import SecondPassRenderer, BaseRenderer
from ...uniformed import UniformedRenderer
from ...util import sample_vertex_shader, gen_screen_mesh
from ....gl.shader import ShaderProgram
from ....gl.framebuffer import FrameBuffer, FB_NONE
from ....model.model import RenderCompound, Material
class LightExtractorRenderer(SecondPassRenderer):
_vert_shader = sample_vertex_shader
# language=GLSL
_frag_shader = '''\
#version 430 core
in vec2 tex_coords;
out vec4 out_color;
uniform sampler2D tex_img;
uniform float limit;
void main() {
vec4 curr_texel = texture(tex_img, tex_coords);
if (dot(curr_texel.xyz, vec3(1)) > limit) out_color = curr_texel;
else out_color = vec4(vec3(0), 1);
}
'''
def __init__(self, width, height, color_buffer_type=1):
self.fbo = FrameBuffer(width, height, color_buffer_type, FB_NONE)
self.shader_prog = ShaderProgram(self._vert_shader, self._frag_shader, use=True)
self.meshes = (RenderCompound(gen_screen_mesh(), Material(self.fbo.color_buffers)), )
self.brightness_limit_setter = self.shader_prog.get_uniform_setter('limit', '1f')
self.brightness_limit_setter(1)
draw = BaseRenderer.draw
class LightMergerRenderer(EffectSupporter):
_vert_shader = sample_vertex_shader
# language=GLSL
_frag_shader = '''\
#version 430 core
in vec2 tex_coords;
out vec4 out_color;
uniform sampler2D raw_img;
uniform sampler2D blurred_img;
uniform float bloom_brightness;
/* uniforms */
void main() {
out_color = texture(raw_img, tex_coords) + texture(blurred_img, tex_coords) * bloom_brightness;
/* main */
}
'''
_frag_out_color_name = 'out_color'
def __init__(self, width, height, src_col_buffer, color_buffer_type=1, custom_effects=()):
super(LightMergerRenderer, self).__init__(custom_effects)
self.fbo = FrameBuffer(width, height, color_buffer_type, FB_NONE)
uniformed = UniformedRenderer(self)
self.meshes = (RenderCompound(gen_screen_mesh(),
Material(((src_col_buffer, uniformed.sampler_data['raw_img'].bind_index),
(self.fbo.color_buffers[0],
uniformed.sampler_data['blurred_img'].bind_index)))
), )
del uniformed
brightness = self.shader_prog.get_uniform_setter('bloom_brightness', '1f')
brightness(0.2)
self.effect_value_setters['brightness'] = brightness
draw = BaseRenderer.draw
class Bloom(SecondPassRenderer):
def __init__(self, width, height, color_buffer_type=1, additional_post_effects=(), blur_points=51, num_passes=1,
brightness=0.1, light_limit=1):
super(Bloom, self).__init__()
self.extract_pass = LightExtractorRenderer(width, height, color_buffer_type)
self.blur_pass = GaussianBlur(width, height, blur_points, color_buffer_type)
self.merge_pass = LightMergerRenderer(width, height, self.extract_pass.fbo.color_buffers[0], color_buffer_type,
additional_post_effects)
self.merge_pass.shader_prog.use()
self.merge_pass.set_effect_value('brightness', brightness)
self.extract_pass.shader_prog.use()
self.extract_pass.brightness_limit_setter(light_limit)
self.meshes = self.extract_pass.meshes
self.fbo = self.extract_pass.fbo
self.num_additional_passes = num_passes - 1
def draw(self, out_fbo, data):
self.extract_pass.draw(self.blur_pass.fbo, data)
for _ in range(self.num_additional_passes):
self.blur_pass.draw(self.blur_pass.fbo, self.blur_pass.meshes)
self.blur_pass.draw(self.merge_pass.fbo, self.blur_pass.meshes)
self.merge_pass.draw(out_fbo, self.merge_pass.meshes) | 0.59302 | 0.138958 |
from util.quadtree import Point
from projections.projection import GeospatialProjection
from scipy.cluster.hierarchy import linkage, leaves_list
from scipy.spatial.distance import euclidean
import numpy as np
class HierarchicalClusteringProjection(GeospatialProjection):
def add_data(self, data, method='single', metric='euclidean'):
samples = np.ndarray(shape=(len(data), 2), dtype=float)
for i,d in enumerate(data):
samples[i,0] = self.x_fn(d)
samples[i,1] = self.y_fn(d)
if len(data) == 1:
# distance matrix empty
self.data = [ Point(samples[0,0], samples[0,1], data[0]) ]
return
Z = linkage(samples, method, metric)
order = leaves_list(Z)
self.data = [ Point(samples[i,0], samples[i,1], data[i]) for i in order ]
def _order(self):
return self.data
def metadata(self):
return dict()
class HierarchicalClusteringFlightdataProjection(GeospatialProjection):
def add_data(self, data, flightdata=None, method='single'):
samples = np.ndarray(shape=(len(data), 2), dtype=float)
for i,d in enumerate(data):
samples[i,0] = self.x_fn(d)
samples[i,1] = self.y_fn(d)
if len(data) == 1:
# distance matrix empty
self.data = [ Point(samples[0,0], samples[0,1], data[0]) ]
return
distances = np.zeros(shape=((len(data) * (len(data) - 1))//2,), dtype=float)
idx = 0
for i, a in enumerate(data):
for j, b in enumerate(data[i+1:]):
idx_a = flightdata['indices'].get(a.id, None)
idx_b = flightdata['indices'].get(b.id, None)
if idx_a is not None and idx_b is not None:
flow = flightdata['matrix'][idx_a * flightdata['size'] + idx_b]
if flow is not None and flow != 0:
distances[idx] = 1 / flow
else:
distances[idx] = euclidean(samples[i], samples[j])
else:
distances[idx] = euclidean(samples[i], samples[j])
idx += 1
Z = linkage(distances, method)
order = leaves_list(Z)
self.data = [ Point(samples[i,0], samples[i,1], data[i]) for i in order ]
def _order(self):
return self.data
def metadata(self):
return dict() | preprocessing/projections/hierarchicalclustering.py | from util.quadtree import Point
from projections.projection import GeospatialProjection
from scipy.cluster.hierarchy import linkage, leaves_list
from scipy.spatial.distance import euclidean
import numpy as np
class HierarchicalClusteringProjection(GeospatialProjection):
def add_data(self, data, method='single', metric='euclidean'):
samples = np.ndarray(shape=(len(data), 2), dtype=float)
for i,d in enumerate(data):
samples[i,0] = self.x_fn(d)
samples[i,1] = self.y_fn(d)
if len(data) == 1:
# distance matrix empty
self.data = [ Point(samples[0,0], samples[0,1], data[0]) ]
return
Z = linkage(samples, method, metric)
order = leaves_list(Z)
self.data = [ Point(samples[i,0], samples[i,1], data[i]) for i in order ]
def _order(self):
return self.data
def metadata(self):
return dict()
class HierarchicalClusteringFlightdataProjection(GeospatialProjection):
def add_data(self, data, flightdata=None, method='single'):
samples = np.ndarray(shape=(len(data), 2), dtype=float)
for i,d in enumerate(data):
samples[i,0] = self.x_fn(d)
samples[i,1] = self.y_fn(d)
if len(data) == 1:
# distance matrix empty
self.data = [ Point(samples[0,0], samples[0,1], data[0]) ]
return
distances = np.zeros(shape=((len(data) * (len(data) - 1))//2,), dtype=float)
idx = 0
for i, a in enumerate(data):
for j, b in enumerate(data[i+1:]):
idx_a = flightdata['indices'].get(a.id, None)
idx_b = flightdata['indices'].get(b.id, None)
if idx_a is not None and idx_b is not None:
flow = flightdata['matrix'][idx_a * flightdata['size'] + idx_b]
if flow is not None and flow != 0:
distances[idx] = 1 / flow
else:
distances[idx] = euclidean(samples[i], samples[j])
else:
distances[idx] = euclidean(samples[i], samples[j])
idx += 1
Z = linkage(distances, method)
order = leaves_list(Z)
self.data = [ Point(samples[i,0], samples[i,1], data[i]) for i in order ]
def _order(self):
return self.data
def metadata(self):
return dict() | 0.47098 | 0.600305 |
import ROOT
import rootUtils as ut
import shipunit as u
fn = 'ship.Pythia8-TGeant4.root'
# fn = 'ship.Genie-TGeant4.root'
f = ROOT.TFile(fn)
sTree = f.FindObjectAny('cbmsim')
nEvents = sTree.GetEntries()
sFol = f.FindObjectAny('cbmroot')
MCTracks = ROOT.TClonesArray("FairMCTrack")
TrackingHits = ROOT.TClonesArray("vetoPoint")
h={}
def exMCTracks():
ut.bookHist(h,'pz','pz',100,0.,100.)
ut.bookHist(h,'oz','oz',100,-10000.,10000.)
ut.bookHist(h,'ex','ex to det',100,-2.5,2.5,100,-2.5,2.5)
ut.bookHist(h,'N','N tracks',300,0.5,299.5)
#
sTree.SetBranchAddress("MCTrack", MCTracks)
detPos = (3.5*u.m+70*u.m+40*u.m-100*u.m)
for n in range(nEvents):
rc = sTree.GetEvent(n)
nMCTracks = MCTracks.GetEntriesFast()
rc = h['N'].Fill( nMCTracks )
for i in range(nMCTracks):
atrack = MCTracks.At(i)
pdgCode = atrack.GetPdgCode()
mom = ROOT.TLorentzVector()
atrack.Get4Momentum(mom)
if abs(pdgCode)==13 or abs(pdgCode)==211:
rc = h['pz'].Fill( mom.Pz() )
rc = h['oz'].Fill( atrack.GetStartZ() )
lam = ( detPos-atrack.GetStartZ() )/mom.Pz()
xdet = (atrack.GetStartX()+lam*mom.Px() )/u.m
ydet = (atrack.GetStartY()+lam*mom.Py() )/u.m
rc = h['ex'].Fill(xdet,ydet )
h['N'].Draw('box')
def exMCHits(dump=False):
ut.bookHist(h,'tz','tracking hits z',100,-100.,100.)
ut.bookHist(h,'tztx','tracking hits x vs z',1000,-40.,40.,100,-2.5,2.5)
ut.bookHist(h,'txty','tracking hits y vs x',100,-2.5,2.5,100,-2.5,2.5)
sTree.SetBranchAddress("vetoPoint", TrackingHits)
for n in range(nEvents):
rc = sTree.GetEvent(n)
nHits = TrackingHits.GetEntriesFast()
for i in range(nHits):
ahit = TrackingHits.At(i)
rc = h['tz'].Fill( ahit.GetZ()/u.m )
rc = h['txty'].Fill( ahit.GetX()/u.m,ahit.GetY()/u.m )
rc = h['tztx'].Fill( ahit.GetZ()/u.m,ahit.GetX()/u.m )
h['tztx'].Draw('box')
if dump:
for n in range( min(10,nEvents) ):
rc = sTree.GetEvent(n)
nHits = TrackingHits.GetEntriesFast()
for i in range(nHits):
ahit = TrackingHits.At(i)
print ahit.GetZ()/u.m, ahit.GetDetectorID(),ahit.GetLength(),ahit.GetEnergyLoss() | python/shipEvent_ex.py | import ROOT
import rootUtils as ut
import shipunit as u
fn = 'ship.Pythia8-TGeant4.root'
# fn = 'ship.Genie-TGeant4.root'
f = ROOT.TFile(fn)
sTree = f.FindObjectAny('cbmsim')
nEvents = sTree.GetEntries()
sFol = f.FindObjectAny('cbmroot')
MCTracks = ROOT.TClonesArray("FairMCTrack")
TrackingHits = ROOT.TClonesArray("vetoPoint")
h={}
def exMCTracks():
ut.bookHist(h,'pz','pz',100,0.,100.)
ut.bookHist(h,'oz','oz',100,-10000.,10000.)
ut.bookHist(h,'ex','ex to det',100,-2.5,2.5,100,-2.5,2.5)
ut.bookHist(h,'N','N tracks',300,0.5,299.5)
#
sTree.SetBranchAddress("MCTrack", MCTracks)
detPos = (3.5*u.m+70*u.m+40*u.m-100*u.m)
for n in range(nEvents):
rc = sTree.GetEvent(n)
nMCTracks = MCTracks.GetEntriesFast()
rc = h['N'].Fill( nMCTracks )
for i in range(nMCTracks):
atrack = MCTracks.At(i)
pdgCode = atrack.GetPdgCode()
mom = ROOT.TLorentzVector()
atrack.Get4Momentum(mom)
if abs(pdgCode)==13 or abs(pdgCode)==211:
rc = h['pz'].Fill( mom.Pz() )
rc = h['oz'].Fill( atrack.GetStartZ() )
lam = ( detPos-atrack.GetStartZ() )/mom.Pz()
xdet = (atrack.GetStartX()+lam*mom.Px() )/u.m
ydet = (atrack.GetStartY()+lam*mom.Py() )/u.m
rc = h['ex'].Fill(xdet,ydet )
h['N'].Draw('box')
def exMCHits(dump=False):
ut.bookHist(h,'tz','tracking hits z',100,-100.,100.)
ut.bookHist(h,'tztx','tracking hits x vs z',1000,-40.,40.,100,-2.5,2.5)
ut.bookHist(h,'txty','tracking hits y vs x',100,-2.5,2.5,100,-2.5,2.5)
sTree.SetBranchAddress("vetoPoint", TrackingHits)
for n in range(nEvents):
rc = sTree.GetEvent(n)
nHits = TrackingHits.GetEntriesFast()
for i in range(nHits):
ahit = TrackingHits.At(i)
rc = h['tz'].Fill( ahit.GetZ()/u.m )
rc = h['txty'].Fill( ahit.GetX()/u.m,ahit.GetY()/u.m )
rc = h['tztx'].Fill( ahit.GetZ()/u.m,ahit.GetX()/u.m )
h['tztx'].Draw('box')
if dump:
for n in range( min(10,nEvents) ):
rc = sTree.GetEvent(n)
nHits = TrackingHits.GetEntriesFast()
for i in range(nHits):
ahit = TrackingHits.At(i)
print ahit.GetZ()/u.m, ahit.GetDetectorID(),ahit.GetLength(),ahit.GetEnergyLoss() | 0.106226 | 0.186576 |
from configparser import ConfigParser
import re
from psycopg2 import connect
from datetime import datetime
__author__ = 'litleleprikon'
SPLIT_RE = re.compile(r" |(?<! |[',\\.:!()@/<>])(?=[',\\.:!()@/<>])|(?<=[',\\.:!()@/<>])(?![',\\.:!()@/<>])",
re.IGNORECASE)
REMOVE_TAGS_RE = re.compile(r'<[A-Za-z\/][^>]*>')
stop_words = None
with open('stop_words.txt', 'r') as sw:
stop_words = set(map(lambda x: x.replace('\n', ''), sw.readlines()))
def get_config():
config = ConfigParser()
config.read('../config.ini')
return config['DATABASE']
config = get_config()
con = connect(database=config['Database'], user=config['User'], password=config['Password'],
host=config['Host'])
# con.autocommit = True
cursor = con.cursor()
cursor2 = con.cursor()
def count_words():
page = 0
while True:
start = datetime.now()
cursor.execute('SELECT id, abstract from project.publication LIMIT 100 OFFSET %s', [page*100])
page += 1
if cursor.rowcount == 0:
break
for abstract in cursor:
d_id = abstract[0]
abstract = REMOVE_TAGS_RE.sub('', abstract[1]).lower()
words = [x for x in SPLIT_RE.split(abstract) if x not in stop_words]
word_counts = dict()
for word in words:
if word in word_counts:
word_counts[word] += 1
else:
word_counts[word] = 1
cursor2.execute('select word, id from project.keyword where word = ANY(%s)', [list(word_counts.keys())])
words_ids = {x[0]: x[1] for x in cursor2}
missed_words = [x for x in words if x not in words_ids]
values = ', '.join(["('{}')".format(x.replace("'", "''")) for x in missed_words])
if missed_words:
query = 'insert into project.keyword (word) VALUES {} RETURNING word, id'.format(values)
cursor2.execute(query)
for x in cursor2:
words_ids[x[0]] = x[1]
for_insert = [{
'word_id': words_ids[word],
'count': word_counts[word],
'publication_id': d_id
} for word in words_ids]
cursor2.executemany('''
INSERT INTO project.word_in_text (word_id, publication_id, count)
VALUES (%(word_id)s, %(publication_id)s, %(count)s)
''', for_insert)
con.commit()
print(datetime.now() - start)
def main():
count_words()
if __name__ == '__main__':
main() | parser/tf_idf.py | from configparser import ConfigParser
import re
from psycopg2 import connect
from datetime import datetime
__author__ = 'litleleprikon'
SPLIT_RE = re.compile(r" |(?<! |[',\\.:!()@/<>])(?=[',\\.:!()@/<>])|(?<=[',\\.:!()@/<>])(?![',\\.:!()@/<>])",
re.IGNORECASE)
REMOVE_TAGS_RE = re.compile(r'<[A-Za-z\/][^>]*>')
stop_words = None
with open('stop_words.txt', 'r') as sw:
stop_words = set(map(lambda x: x.replace('\n', ''), sw.readlines()))
def get_config():
config = ConfigParser()
config.read('../config.ini')
return config['DATABASE']
config = get_config()
con = connect(database=config['Database'], user=config['User'], password=config['Password'],
host=config['Host'])
# con.autocommit = True
cursor = con.cursor()
cursor2 = con.cursor()
def count_words():
page = 0
while True:
start = datetime.now()
cursor.execute('SELECT id, abstract from project.publication LIMIT 100 OFFSET %s', [page*100])
page += 1
if cursor.rowcount == 0:
break
for abstract in cursor:
d_id = abstract[0]
abstract = REMOVE_TAGS_RE.sub('', abstract[1]).lower()
words = [x for x in SPLIT_RE.split(abstract) if x not in stop_words]
word_counts = dict()
for word in words:
if word in word_counts:
word_counts[word] += 1
else:
word_counts[word] = 1
cursor2.execute('select word, id from project.keyword where word = ANY(%s)', [list(word_counts.keys())])
words_ids = {x[0]: x[1] for x in cursor2}
missed_words = [x for x in words if x not in words_ids]
values = ', '.join(["('{}')".format(x.replace("'", "''")) for x in missed_words])
if missed_words:
query = 'insert into project.keyword (word) VALUES {} RETURNING word, id'.format(values)
cursor2.execute(query)
for x in cursor2:
words_ids[x[0]] = x[1]
for_insert = [{
'word_id': words_ids[word],
'count': word_counts[word],
'publication_id': d_id
} for word in words_ids]
cursor2.executemany('''
INSERT INTO project.word_in_text (word_id, publication_id, count)
VALUES (%(word_id)s, %(publication_id)s, %(count)s)
''', for_insert)
con.commit()
print(datetime.now() - start)
def main():
count_words()
if __name__ == '__main__':
main() | 0.277277 | 0.083143 |
class CoupledPair(object):
"""
Custom Pair class. CoupledPair has special methods that allow checking for
clashing with another pair, similarity to another pair, retrieving a value
in the pair with its counterpart value and modifying it, giving it more
utility and versatility. These methods are used extensively in the
implementation of CoupledValues. To learn more, print out the docstring for
each method with:
>>> print(help(CoupledPair.<method name>))
Example
-------
>>> my_pair = CoupledPair("a", "b") # This is okay
>>> try:
... another_pair = CoupledPair("c", "c") # Values cannot be the same
... except ValueError as e:
... print(e)
pair values cannot be the same
Parameters
----------
first: object
First value in a pair. The order does not matter as both objects can be
used as the key for its paired value
second: object
Second value in a pair. The order does not matter as both objects can be
used as the key for its paired value
Raises
------
ValueError
If first and second are the same
Returns
-------
pair: CoupledPair
"""
### ~~~~~~~~~~~~~~~~~~~~~~~~~~ CREATE rud ~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
def __init__(self, first, second):
if first == second:
raise ValueError(f"pair values cannot be the same")
self.first = first
self.second = second
### ~~~~~~~~~~~~~~~~~~~~~~~~~~ c READ ud ~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
def __contains__(self, value):
return self.has(value)
def __eq__(self, other_pair):
return self.is_similar_to(other_pair)
def __repr__(self):
return self.to_str()
def __str__(self):
return self.to_str()
def clashes_with(self, other_pair):
"""
Checks if for clashing. If two pairs clash, it means one pair has a
value that the other pair has. This method is particularly useful for
CoupledValues as it can be used to ensure that none of the pairs in the
set have two similar values, preventing accessing pairs by key to be
erroneous.
Example
-------
>>> pair1 = CoupledPair("a", "b")
>>> pair2 = CoupledPair("b", "c")
>>> pair1.clashes_with(pair2)
True
Parameters
----------
other_pair: CoupledPair
The other pair you want to check for clashing
Raises
------
TypeError
If other_pair is not a CoupledPair object
Returns
-------
clashes: bool
Whether the two pairs clash
"""
if not isinstance(other_pair, CoupledPair):
raise TypeError("other_pair must be an instance of CoupledPair")
conditions = [
self.has(other_pair.first),
self.has(other_pair.second)
]
return any(conditions)
def copy(self):
"""
Copy the current values including references into a new CoupledPair
instance.
Returns
-------
new_pair: CoupledPair
The copied pair
"""
return CoupledPair(self.first, self.second)
def counterpart(self, key):
"""
Returns the value of one of the objects in the pair based on the value
of the other object.
Example
-------
>>> my_pair = CoupledPair("some text", 419)
>>> my_pair.counterpart(419)
'some text'
Parameters
----------
key: object
The value of the other object
Raises
------
KeyError
If key is not in the pair
Returns
-------
counterpart: object
"""
if key == self.first:
return self.second
elif key == self.second:
return self.first
else:
raise KeyError(f"{key} does not exist here")
def has(self, value):
"""
Whether a pair has a value.
Example
-------
>>> my_pair = CoupledPair("something", "more things")
>>> my_pair.has("something")
True
Parameters
----------
value: object
Returns
-------
bool
"""
conditions = [
value == self.first,
value == self.second
]
return any(conditions)
def is_similar_to(self, other_pair):
"""
Whether 2 pairs have the same values.
Example
-------
>>> pair1 = CoupledPair("a", "b")
>>> pair2 = CoupledPair("a", "b")
>>> pair3 = CoupledPair("b", "c")
>>> pair1.is_similar_to(pair2)
True
>>> pair1.is_similar_to(pair3)
False
Parameters
----------
other_pair: CoupledPair
Raises
------
TypeError
If other_pair is not an instance of CoupledPair
Returns
-------
bool
"""
if not isinstance(other_pair, CoupledPair):
raise TypeError("other_pair must be an instance of CoupledPair")
conditions = [
self.first == other_pair.first and self.second == other_pair.second,
self.first == other_pair.second and self.second == other_pair.first
]
return any(conditions)
def setup_str(self):
"""
Sets up pair values to become a string. If one of the values in the pair
is a string type, then quotation marks are added around it. This method
is run before converting a CoupledPair to string.
Returns
-------
(first, second): tuple of values
Values that have been set up to be converted to string
"""
return repr(self.first), repr(self.second)
def to_str(self):
"""
Convert CoupledPair to full string.
Returns
-------
str
"""
s_str = self.setup_str()
return f"CoupledPair({s_str[0]}, {s_str[1]})"
def to_mini_str(self):
"""
Convert CoupledPair to short string, used by CoupledValues in its own
to_str method.
Returns
-------
str
"""
s_str = self.setup_str()
return f"({s_str[0]}, {s_str[1]})"
### ~~~~~~~~~~~~~~~~~~~~~~~~~ cr UPDATE d ~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
def modify(self, key, value):
"""
Modify one of the values in the pair by accessing it with the value of
the other object in the pair.
Example
-------
>>> my_pair = CoupledPair("my key", "oh no this is wrong")
>>> my_pair.modify("my key", "new value")
>>> print(my_pair)
CoupledPair("my key", "new value")
Parameters
----------
key: object
The value of the key in the pair
value: object
The new value you want to modify the value of the pair with
Raises
------
KeyError
If key is not in the pair
ValueError
If value is the same as key
Returns
-------
None
"""
if key == value:
raise ValueError("key cannot be the same as value")
if key == self.first:
self.second = value
elif key == self.second:
self.first = value
else:
raise KeyError(f"{key} does not exist")
### ~~~~~~~~~~~~~~~~~~~~~~~~~~ GENERAL FUNCTIONS ~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
def make_pairs(values):
"""
Makes a list pairs from an iterable. However, different iterables have
different behaviours when making a list of pairs.
If you are trying to make a list of pairs from a CoupledPair,
the CoupledPair object is wrapped in a list and returned back to you.
If you are trying to make a list of pairs from a list or set,
make_pairs loops through the array and forms CoupledPair objects
recursively.
If you are trying to make a list of pairs from a tuple,
the CoupledPair initializer is run and the new CoupledPair object is
returned in a list.
If you are trying to make a list of pairs from a dict,
the items in the dictionary are looped through and CoupledPair instances are
created. Using a dictionary to create a list of CoupledPair objects is by far
the safest method.
Parameters
----------
value: CoupledPair, list, set, tuple or dict
Returns
-------
list of CoupledPair
"""
if isinstance(values, CoupledPair):
return [values]
elif isinstance(values, list) or isinstance(values, set):
result = []
for value in values:
result.extend(make_pairs(value))
return result
elif isinstance(values, tuple):
return [CoupledPair(values[0], values[1])]
elif isinstance(values, dict):
result = []
for key, value in values.items():
result.append(CoupledPair(key, value))
return result
else:
raise TypeError(
"make_pairs only accepts CoupledPair, list, set, tuple or dict"
) | coupledpairs/coupledpairs.py |
class CoupledPair(object):
"""
Custom Pair class. CoupledPair has special methods that allow checking for
clashing with another pair, similarity to another pair, retrieving a value
in the pair with its counterpart value and modifying it, giving it more
utility and versatility. These methods are used extensively in the
implementation of CoupledValues. To learn more, print out the docstring for
each method with:
>>> print(help(CoupledPair.<method name>))
Example
-------
>>> my_pair = CoupledPair("a", "b") # This is okay
>>> try:
... another_pair = CoupledPair("c", "c") # Values cannot be the same
... except ValueError as e:
... print(e)
pair values cannot be the same
Parameters
----------
first: object
First value in a pair. The order does not matter as both objects can be
used as the key for its paired value
second: object
Second value in a pair. The order does not matter as both objects can be
used as the key for its paired value
Raises
------
ValueError
If first and second are the same
Returns
-------
pair: CoupledPair
"""
### ~~~~~~~~~~~~~~~~~~~~~~~~~~ CREATE rud ~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
def __init__(self, first, second):
if first == second:
raise ValueError(f"pair values cannot be the same")
self.first = first
self.second = second
### ~~~~~~~~~~~~~~~~~~~~~~~~~~ c READ ud ~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
def __contains__(self, value):
return self.has(value)
def __eq__(self, other_pair):
return self.is_similar_to(other_pair)
def __repr__(self):
return self.to_str()
def __str__(self):
return self.to_str()
def clashes_with(self, other_pair):
"""
Checks if for clashing. If two pairs clash, it means one pair has a
value that the other pair has. This method is particularly useful for
CoupledValues as it can be used to ensure that none of the pairs in the
set have two similar values, preventing accessing pairs by key to be
erroneous.
Example
-------
>>> pair1 = CoupledPair("a", "b")
>>> pair2 = CoupledPair("b", "c")
>>> pair1.clashes_with(pair2)
True
Parameters
----------
other_pair: CoupledPair
The other pair you want to check for clashing
Raises
------
TypeError
If other_pair is not a CoupledPair object
Returns
-------
clashes: bool
Whether the two pairs clash
"""
if not isinstance(other_pair, CoupledPair):
raise TypeError("other_pair must be an instance of CoupledPair")
conditions = [
self.has(other_pair.first),
self.has(other_pair.second)
]
return any(conditions)
def copy(self):
"""
Copy the current values including references into a new CoupledPair
instance.
Returns
-------
new_pair: CoupledPair
The copied pair
"""
return CoupledPair(self.first, self.second)
def counterpart(self, key):
"""
Returns the value of one of the objects in the pair based on the value
of the other object.
Example
-------
>>> my_pair = CoupledPair("some text", 419)
>>> my_pair.counterpart(419)
'some text'
Parameters
----------
key: object
The value of the other object
Raises
------
KeyError
If key is not in the pair
Returns
-------
counterpart: object
"""
if key == self.first:
return self.second
elif key == self.second:
return self.first
else:
raise KeyError(f"{key} does not exist here")
def has(self, value):
"""
Whether a pair has a value.
Example
-------
>>> my_pair = CoupledPair("something", "more things")
>>> my_pair.has("something")
True
Parameters
----------
value: object
Returns
-------
bool
"""
conditions = [
value == self.first,
value == self.second
]
return any(conditions)
def is_similar_to(self, other_pair):
"""
Whether 2 pairs have the same values.
Example
-------
>>> pair1 = CoupledPair("a", "b")
>>> pair2 = CoupledPair("a", "b")
>>> pair3 = CoupledPair("b", "c")
>>> pair1.is_similar_to(pair2)
True
>>> pair1.is_similar_to(pair3)
False
Parameters
----------
other_pair: CoupledPair
Raises
------
TypeError
If other_pair is not an instance of CoupledPair
Returns
-------
bool
"""
if not isinstance(other_pair, CoupledPair):
raise TypeError("other_pair must be an instance of CoupledPair")
conditions = [
self.first == other_pair.first and self.second == other_pair.second,
self.first == other_pair.second and self.second == other_pair.first
]
return any(conditions)
def setup_str(self):
"""
Sets up pair values to become a string. If one of the values in the pair
is a string type, then quotation marks are added around it. This method
is run before converting a CoupledPair to string.
Returns
-------
(first, second): tuple of values
Values that have been set up to be converted to string
"""
return repr(self.first), repr(self.second)
def to_str(self):
"""
Convert CoupledPair to full string.
Returns
-------
str
"""
s_str = self.setup_str()
return f"CoupledPair({s_str[0]}, {s_str[1]})"
def to_mini_str(self):
"""
Convert CoupledPair to short string, used by CoupledValues in its own
to_str method.
Returns
-------
str
"""
s_str = self.setup_str()
return f"({s_str[0]}, {s_str[1]})"
### ~~~~~~~~~~~~~~~~~~~~~~~~~ cr UPDATE d ~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
def modify(self, key, value):
"""
Modify one of the values in the pair by accessing it with the value of
the other object in the pair.
Example
-------
>>> my_pair = CoupledPair("my key", "oh no this is wrong")
>>> my_pair.modify("my key", "new value")
>>> print(my_pair)
CoupledPair("my key", "new value")
Parameters
----------
key: object
The value of the key in the pair
value: object
The new value you want to modify the value of the pair with
Raises
------
KeyError
If key is not in the pair
ValueError
If value is the same as key
Returns
-------
None
"""
if key == value:
raise ValueError("key cannot be the same as value")
if key == self.first:
self.second = value
elif key == self.second:
self.first = value
else:
raise KeyError(f"{key} does not exist")
### ~~~~~~~~~~~~~~~~~~~~~~~~~~ GENERAL FUNCTIONS ~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
def make_pairs(values):
"""
Makes a list pairs from an iterable. However, different iterables have
different behaviours when making a list of pairs.
If you are trying to make a list of pairs from a CoupledPair,
the CoupledPair object is wrapped in a list and returned back to you.
If you are trying to make a list of pairs from a list or set,
make_pairs loops through the array and forms CoupledPair objects
recursively.
If you are trying to make a list of pairs from a tuple,
the CoupledPair initializer is run and the new CoupledPair object is
returned in a list.
If you are trying to make a list of pairs from a dict,
the items in the dictionary are looped through and CoupledPair instances are
created. Using a dictionary to create a list of CoupledPair objects is by far
the safest method.
Parameters
----------
value: CoupledPair, list, set, tuple or dict
Returns
-------
list of CoupledPair
"""
if isinstance(values, CoupledPair):
return [values]
elif isinstance(values, list) or isinstance(values, set):
result = []
for value in values:
result.extend(make_pairs(value))
return result
elif isinstance(values, tuple):
return [CoupledPair(values[0], values[1])]
elif isinstance(values, dict):
result = []
for key, value in values.items():
result.append(CoupledPair(key, value))
return result
else:
raise TypeError(
"make_pairs only accepts CoupledPair, list, set, tuple or dict"
) | 0.871612 | 0.394026 |
from pycatia.knowledge_interfaces.enum_param import EnumParam
class BoolParam(EnumParam):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| System.IUnknown
| System.IDispatch
| System.CATBaseUnknown
| System.CATBaseDispatch
| System.AnyObject
| KnowledgeInterfaces.Parameter
| KnowledgeInterfaces.EnumParam
| BoolParam
|
| Represents the boolean parameter.
| The following example shows how to create it:
|
| Dim CATDocs As Documents
| Set CATDocs = CATIA.Documents
| Dim part1 As Document
| Set part1 = CATDocs.Add("CATPart")
| Dim availability As BooleanParam
| Set availability = part1.Parameters.CreateBoolean("availability", True)
"""
def __init__(self, com_object):
super().__init__(com_object)
self.bool_param = com_object
@property
def value(self) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| o Property Value() As boolean
|
| Returns or sets the value of the boolean parameter.
|
| Example:
| This example sets the availability boolean parameter value to True if
| its value is False:
|
| If (availability.Value = False) Then
| availability.Value = True
| End If
:return: bool
:rtype: bool
"""
return self.bool_param.Value
@value.setter
def value(self, value: bool):
"""
:param bool value:
"""
self.bool_param.Value = value
def __repr__(self):
return f'BoolParam(name="{self.name}")' | pycatia/knowledge_interfaces/bool_param.py | from pycatia.knowledge_interfaces.enum_param import EnumParam
class BoolParam(EnumParam):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| System.IUnknown
| System.IDispatch
| System.CATBaseUnknown
| System.CATBaseDispatch
| System.AnyObject
| KnowledgeInterfaces.Parameter
| KnowledgeInterfaces.EnumParam
| BoolParam
|
| Represents the boolean parameter.
| The following example shows how to create it:
|
| Dim CATDocs As Documents
| Set CATDocs = CATIA.Documents
| Dim part1 As Document
| Set part1 = CATDocs.Add("CATPart")
| Dim availability As BooleanParam
| Set availability = part1.Parameters.CreateBoolean("availability", True)
"""
def __init__(self, com_object):
super().__init__(com_object)
self.bool_param = com_object
@property
def value(self) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| o Property Value() As boolean
|
| Returns or sets the value of the boolean parameter.
|
| Example:
| This example sets the availability boolean parameter value to True if
| its value is False:
|
| If (availability.Value = False) Then
| availability.Value = True
| End If
:return: bool
:rtype: bool
"""
return self.bool_param.Value
@value.setter
def value(self, value: bool):
"""
:param bool value:
"""
self.bool_param.Value = value
def __repr__(self):
return f'BoolParam(name="{self.name}")' | 0.883205 | 0.373105 |
from __future__ import print_function
from __future__ import division
import os, sys
sys.path.insert(0, r'../')
import time
import argparse
from optimise import TRAIN, TUNE, hyperoptTUNE, skoptTUNE
parser = argparse.ArgumentParser()
# Pick a data set and a LSTM model
parser.add_argument('--data', type=str, default='lidong', help='Choose a data set; lidong or election')
parser.add_argument('--load_data', action='store_true', help='Load previously saved data')
parser.add_argument('--model', type=str, default='LSTM', help='Choose a model; LSTM, TDLSTM or TCLSTM')
parser.add_argument('--tune', action='store_true', help='Whether or not to optimise hyperparameters')
parser.add_argument('--tuning_method', type=str, default='skopt', help='Which optimization method to use: grid, rand, hyperopt or skopt')
parser.add_argument('--num_calls', type=int, default=10, help='Number of settings sampled for hyper-parameter tuning')
# Training parameters
parser.add_argument('--random_state', type=int, default=42, help='Random state initialization for reproducibility')
parser.add_argument('--batch_size', type=int, default=51, help='Mini-batch size')
parser.add_argument('--seq_len', type=int, default=42, help='Sequence length')
parser.add_argument('--num_hidden', type=int, default=382, help='Number of units in the hidden layer')
parser.add_argument('--num_classes', type=int, default=3, help='Number of classes/labels')
parser.add_argument('--dropout_input', type=float, default=0.4, help='Input keep probability for dropout')
parser.add_argument('--dropout_output', type=float, default=0.4, help='Output keep probability for dropout')
parser.add_argument('--clip_norm', type=float, default=0.5, help='Gradient clipping ratio')
parser.add_argument('--learning_rate', type=float, default=0.001, help='Learning rate for the optimizer')
parser.add_argument('--max_epoch', type=int, default=1000, help='Total number of epochs for training')
parser.add_argument('--early_stopping_rounds', type=int, default=20, help='Number of epochs allowed for setting early stopping criterion')
parser.add_argument('--scoring_metrics', type=str, default='accuracy', help='Classifiaction metrics used for early stopping')
# Session parameters
parser.add_argument('--restore', action='store_true', help='Restore previously trained model')
parser.add_argument('--checkpoint_file', type=str, default='../checkpoints/lstm', help='Checkpoint file path')
parser.add_argument('--allow_soft_placement', type=bool, default=True, help='Allow soft device replacement')
parser.add_argument('--log_device_placement', type=bool, default=False, help='Log placement of ops on devices')
args = parser.parse_args()
if __name__ == '__main__':
t0 = time.time()
if not args.tune:
TRAIN(args, args.model)
else:
if args.tuning_method == 'skopt':
skoptTUNE(args, args.model, args.num_calls)
elif args.tuning_method == 'hyperopt':
hyperoptTUNE(args, args.model, args.num_calls)
elif args.tuning_method == 'rand':
TUNE(args, args.model, 'rand', args.num_calls)
else:
TUNE(args, args.model, 'grid')
# TEST(args.model)
print()
print("Total time taken: %f mins"%((time.time()-t0)/60)) | src/run.py | from __future__ import print_function
from __future__ import division
import os, sys
sys.path.insert(0, r'../')
import time
import argparse
from optimise import TRAIN, TUNE, hyperoptTUNE, skoptTUNE
parser = argparse.ArgumentParser()
# Pick a data set and a LSTM model
parser.add_argument('--data', type=str, default='lidong', help='Choose a data set; lidong or election')
parser.add_argument('--load_data', action='store_true', help='Load previously saved data')
parser.add_argument('--model', type=str, default='LSTM', help='Choose a model; LSTM, TDLSTM or TCLSTM')
parser.add_argument('--tune', action='store_true', help='Whether or not to optimise hyperparameters')
parser.add_argument('--tuning_method', type=str, default='skopt', help='Which optimization method to use: grid, rand, hyperopt or skopt')
parser.add_argument('--num_calls', type=int, default=10, help='Number of settings sampled for hyper-parameter tuning')
# Training parameters
parser.add_argument('--random_state', type=int, default=42, help='Random state initialization for reproducibility')
parser.add_argument('--batch_size', type=int, default=51, help='Mini-batch size')
parser.add_argument('--seq_len', type=int, default=42, help='Sequence length')
parser.add_argument('--num_hidden', type=int, default=382, help='Number of units in the hidden layer')
parser.add_argument('--num_classes', type=int, default=3, help='Number of classes/labels')
parser.add_argument('--dropout_input', type=float, default=0.4, help='Input keep probability for dropout')
parser.add_argument('--dropout_output', type=float, default=0.4, help='Output keep probability for dropout')
parser.add_argument('--clip_norm', type=float, default=0.5, help='Gradient clipping ratio')
parser.add_argument('--learning_rate', type=float, default=0.001, help='Learning rate for the optimizer')
parser.add_argument('--max_epoch', type=int, default=1000, help='Total number of epochs for training')
parser.add_argument('--early_stopping_rounds', type=int, default=20, help='Number of epochs allowed for setting early stopping criterion')
parser.add_argument('--scoring_metrics', type=str, default='accuracy', help='Classifiaction metrics used for early stopping')
# Session parameters
parser.add_argument('--restore', action='store_true', help='Restore previously trained model')
parser.add_argument('--checkpoint_file', type=str, default='../checkpoints/lstm', help='Checkpoint file path')
parser.add_argument('--allow_soft_placement', type=bool, default=True, help='Allow soft device replacement')
parser.add_argument('--log_device_placement', type=bool, default=False, help='Log placement of ops on devices')
args = parser.parse_args()
if __name__ == '__main__':
t0 = time.time()
if not args.tune:
TRAIN(args, args.model)
else:
if args.tuning_method == 'skopt':
skoptTUNE(args, args.model, args.num_calls)
elif args.tuning_method == 'hyperopt':
hyperoptTUNE(args, args.model, args.num_calls)
elif args.tuning_method == 'rand':
TUNE(args, args.model, 'rand', args.num_calls)
else:
TUNE(args, args.model, 'grid')
# TEST(args.model)
print()
print("Total time taken: %f mins"%((time.time()-t0)/60)) | 0.444565 | 0.056914 |
import subprocess
import datetime
from ruffus import *
import pandas as pd
import re
import urllib.request
@originate("data/BLUETH_20150819.BT")
def bt19(output_file):
# Download file from AARNet Cloudstor OwnCloud Service
url = "https://cloudstor.aarnet.edu.au/plus/index.php/s/SlTMKzq9OKOaWQr/download?path=%2Fvicroads_opendata&files=BLUETH_20150819.BT"
print("Downloading {0} from {1}".format(output_file, url))
urllib.request.urlretrieve(url, output_file)
@originate("data/BLUETH_20150826.BT")
def bt26(output_file):
url = "https://cloudstor.aarnet.edu.au/plus/index.php/s/SlTMKzq9OKOaWQr/download?path=%2Fvicroads_opendata&files=BLUETH_20150826.BT"
print("Downloading {0} from {1}".format(output_file, url))
urllib.request.urlretrieve(url, output_file)
# X.BT -> X.filtered.BT
@transform([bt19, bt26],
suffix(".BT"),
".filtered.BT")
def filter_bt(input_file, output_file):
# filter to just sites 2425 and 2409
# grep is a lot faster than processing the file in Python
with open(output_file, 'w') as outfile:
subprocess.call(['grep', '-E', '^(2425|2409),', input_file], stdout=outfile)
def segments(df):
"""
Convert ordered table of visited sites into segments between adjacent nodes.
dataframe -- site, time, bluetooth_id
"""
results = []
last_row = None
for index, row in df.iterrows():
if last_row is not None and row["Site"] != last_row["Site"]:
segment = (last_row["Anonymized Bluetooth ID"],
last_row["Site"],
row["Site"],
last_row["Unix Time"],
row["Unix Time"])
results.append(segment)
last_row = row
return results
def parse_date(unix_time):
d_utc = datetime.datetime.utcfromtimestamp(unix_time)
# Unix servers *should* have their system clock set to UTC.
# So we theoretically, we need to convert from UTC to AEST (localtime).
# However, VicRoads seems to have set their operating system clock to AEST.
# The easiest way to deal with this, is to treat all datetimes as naive (ignore timezone).
# TLDR; VicRoads didn't handle timezones correctly. We need to copy their error for consistency.
d_local = d_utc # Naive datetime. It's already shifted to AEST (but shouldn't be)
return d_local
# X.filtered.BT -> X.traveltime
@transform(filter_bt,
suffix(".filtered.BT"),
".traveltime")
def import_bt(input_file, output_file):
# Load into Pandas Data Table
f = pd.read_csv(input_file, header=None, names=['Site', 'Unix Time', 'Anonymized Bluetooth ID'])
f_sorted = f.sort_values(by=['Anonymized Bluetooth ID', 'Unix Time'])
f_groups = f_sorted.groupby(['Anonymized Bluetooth ID'])
results = []
for bt_id, data in f_groups:
for segment in segments(data):
results.append(segment)
all_segments = pd.DataFrame(results,
columns=('Anonymized Bluetooth ID', 'Site A', 'Site B', 'Time A', 'Time B'))
inbound = all_segments[all_segments["Site A"] == 2409]
inbound = inbound.copy()
travel_time = inbound["Time B"] - inbound["Time A"]
inbound["Travel Time"] = travel_time
# Filter extreme travel times
inbound = inbound[inbound["Travel Time"] <= 1800]
ts = pd.Series(list(inbound["Travel Time"]),
index=list([parse_date(t) for t in inbound["Time A"]]))
ts_resampled = ts.resample('15Min', how='median')
# extract collection date from filename
p = re.compile(r"data/BLUETH_(?P<date>\d{8})\.filtered.BT")
m = p.match(input_file)
date_str = m.group('date')
start_datetime = datetime.datetime.strptime(date_str, '%Y%m%d')
# Index over entire day, even if some times are missing. Last 15 minutes usualy not present.
rng = pd.date_range(start_datetime, periods=24*4, freq='15Min')
ts_resampled = pd.Series(ts_resampled, index=rng)
# Fill in missing values
ts_resampled = ts_resampled.fillna(method='pad')
# Travel time from site 2409 (Chapel St) to 2425 (Warrigal Rd) along Princes Highway (Outbound/Westbound).
ts_resampled.to_csv(output_file) | pipeline/data_bt.py | import subprocess
import datetime
from ruffus import *
import pandas as pd
import re
import urllib.request
@originate("data/BLUETH_20150819.BT")
def bt19(output_file):
# Download file from AARNet Cloudstor OwnCloud Service
url = "https://cloudstor.aarnet.edu.au/plus/index.php/s/SlTMKzq9OKOaWQr/download?path=%2Fvicroads_opendata&files=BLUETH_20150819.BT"
print("Downloading {0} from {1}".format(output_file, url))
urllib.request.urlretrieve(url, output_file)
@originate("data/BLUETH_20150826.BT")
def bt26(output_file):
url = "https://cloudstor.aarnet.edu.au/plus/index.php/s/SlTMKzq9OKOaWQr/download?path=%2Fvicroads_opendata&files=BLUETH_20150826.BT"
print("Downloading {0} from {1}".format(output_file, url))
urllib.request.urlretrieve(url, output_file)
# X.BT -> X.filtered.BT
@transform([bt19, bt26],
suffix(".BT"),
".filtered.BT")
def filter_bt(input_file, output_file):
# filter to just sites 2425 and 2409
# grep is a lot faster than processing the file in Python
with open(output_file, 'w') as outfile:
subprocess.call(['grep', '-E', '^(2425|2409),', input_file], stdout=outfile)
def segments(df):
"""
Convert ordered table of visited sites into segments between adjacent nodes.
dataframe -- site, time, bluetooth_id
"""
results = []
last_row = None
for index, row in df.iterrows():
if last_row is not None and row["Site"] != last_row["Site"]:
segment = (last_row["Anonymized Bluetooth ID"],
last_row["Site"],
row["Site"],
last_row["Unix Time"],
row["Unix Time"])
results.append(segment)
last_row = row
return results
def parse_date(unix_time):
d_utc = datetime.datetime.utcfromtimestamp(unix_time)
# Unix servers *should* have their system clock set to UTC.
# So we theoretically, we need to convert from UTC to AEST (localtime).
# However, VicRoads seems to have set their operating system clock to AEST.
# The easiest way to deal with this, is to treat all datetimes as naive (ignore timezone).
# TLDR; VicRoads didn't handle timezones correctly. We need to copy their error for consistency.
d_local = d_utc # Naive datetime. It's already shifted to AEST (but shouldn't be)
return d_local
# X.filtered.BT -> X.traveltime
@transform(filter_bt,
suffix(".filtered.BT"),
".traveltime")
def import_bt(input_file, output_file):
# Load into Pandas Data Table
f = pd.read_csv(input_file, header=None, names=['Site', 'Unix Time', 'Anonymized Bluetooth ID'])
f_sorted = f.sort_values(by=['Anonymized Bluetooth ID', 'Unix Time'])
f_groups = f_sorted.groupby(['Anonymized Bluetooth ID'])
results = []
for bt_id, data in f_groups:
for segment in segments(data):
results.append(segment)
all_segments = pd.DataFrame(results,
columns=('Anonymized Bluetooth ID', 'Site A', 'Site B', 'Time A', 'Time B'))
inbound = all_segments[all_segments["Site A"] == 2409]
inbound = inbound.copy()
travel_time = inbound["Time B"] - inbound["Time A"]
inbound["Travel Time"] = travel_time
# Filter extreme travel times
inbound = inbound[inbound["Travel Time"] <= 1800]
ts = pd.Series(list(inbound["Travel Time"]),
index=list([parse_date(t) for t in inbound["Time A"]]))
ts_resampled = ts.resample('15Min', how='median')
# extract collection date from filename
p = re.compile(r"data/BLUETH_(?P<date>\d{8})\.filtered.BT")
m = p.match(input_file)
date_str = m.group('date')
start_datetime = datetime.datetime.strptime(date_str, '%Y%m%d')
# Index over entire day, even if some times are missing. Last 15 minutes usualy not present.
rng = pd.date_range(start_datetime, periods=24*4, freq='15Min')
ts_resampled = pd.Series(ts_resampled, index=rng)
# Fill in missing values
ts_resampled = ts_resampled.fillna(method='pad')
# Travel time from site 2409 (Chapel St) to 2425 (Warrigal Rd) along Princes Highway (Outbound/Westbound).
ts_resampled.to_csv(output_file) | 0.432303 | 0.241445 |
# Part 1 - Data Preprocessing
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the training set
dataset_train = pd.read_csv('Google_Stock_Price_Train.csv')
training_set = dataset_train.iloc[:,1:2].values
# Feature Scaling
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler(feature_range = (0, 1))
training_set_scaled = sc.fit_transform(training_set)
# Creating a data structure with 60 timesteps and 1 output
X_train = []
y_train = []
for i in range(60, 1258):
X_train.append(training_set_scaled[i-60:i, 0])
y_train.append(training_set_scaled[i, 0])
X_train, y_train = np.array(X_train), np.array(y_train)
# Reshaping
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
# Part 2 - Building the RNN
# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
# Initialising the RNN
regressor = Sequential()
# Adding the first LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = 50, return_sequences = True, input_shape = (X_train.shape[1], 1)))
regressor.add(Dropout(0.2))
# Adding a second LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = 50, return_sequences = True))
regressor.add(Dropout(0.2))
# Adding a third LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = 50, return_sequences = True))
regressor.add(Dropout(0.2))
# Adding a fourth LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = 50))
regressor.add(Dropout(0.2))
# Adding the output layer
regressor.add(Dense(units = 1))
# Compiling the RNN
regressor.compile(optimizer = 'adam', loss = 'mean_squared_error')
# Fitting the RNN to the Training set
regressor.fit(X_train, y_train, epochs = 100, batch_size = 32)
# Part 3 - Making the predictions and visualising the results
# Getting the real stock price of 2017
dataset_test = pd.read_csv('Google_Stock_Price_Test.csv')
real_stock_price = dataset_test.iloc[:, 1:2].values
# Getting the predicted stock price of 2017
dataset_total = pd.concat((dataset_train['Open'], dataset_test['Open']), axis = 0)
inputs = dataset_total[len(dataset_total) - len(dataset_test) - 60:].values
inputs = inputs.reshape(-1,1)
inputs = sc.transform(inputs)
X_test = []
for i in range(60, 80):
X_test.append(inputs[i-60:i, 0])
X_test = np.array(X_test)
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
predicted_stock_price = regressor.predict(X_test)
predicted_stock_price = sc.inverse_transform(predicted_stock_price)
# Visualising the results
plt.plot(real_stock_price, color = 'red', label = 'Real Google Stock Price')
plt.plot(predicted_stock_price, color = 'blue', label = 'Predicted Google Stock Price')
plt.title('Google Stock Price Prediction')
plt.xlabel('Time')
plt.ylabel('Google Stock Price')
plt.legend()
plt.show() | rnn1.py |
# Part 1 - Data Preprocessing
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the training set
dataset_train = pd.read_csv('Google_Stock_Price_Train.csv')
training_set = dataset_train.iloc[:,1:2].values
# Feature Scaling
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler(feature_range = (0, 1))
training_set_scaled = sc.fit_transform(training_set)
# Creating a data structure with 60 timesteps and 1 output
X_train = []
y_train = []
for i in range(60, 1258):
X_train.append(training_set_scaled[i-60:i, 0])
y_train.append(training_set_scaled[i, 0])
X_train, y_train = np.array(X_train), np.array(y_train)
# Reshaping
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
# Part 2 - Building the RNN
# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
# Initialising the RNN
regressor = Sequential()
# Adding the first LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = 50, return_sequences = True, input_shape = (X_train.shape[1], 1)))
regressor.add(Dropout(0.2))
# Adding a second LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = 50, return_sequences = True))
regressor.add(Dropout(0.2))
# Adding a third LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = 50, return_sequences = True))
regressor.add(Dropout(0.2))
# Adding a fourth LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = 50))
regressor.add(Dropout(0.2))
# Adding the output layer
regressor.add(Dense(units = 1))
# Compiling the RNN
regressor.compile(optimizer = 'adam', loss = 'mean_squared_error')
# Fitting the RNN to the Training set
regressor.fit(X_train, y_train, epochs = 100, batch_size = 32)
# Part 3 - Making the predictions and visualising the results
# Getting the real stock price of 2017
dataset_test = pd.read_csv('Google_Stock_Price_Test.csv')
real_stock_price = dataset_test.iloc[:, 1:2].values
# Getting the predicted stock price of 2017
dataset_total = pd.concat((dataset_train['Open'], dataset_test['Open']), axis = 0)
inputs = dataset_total[len(dataset_total) - len(dataset_test) - 60:].values
inputs = inputs.reshape(-1,1)
inputs = sc.transform(inputs)
X_test = []
for i in range(60, 80):
X_test.append(inputs[i-60:i, 0])
X_test = np.array(X_test)
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
predicted_stock_price = regressor.predict(X_test)
predicted_stock_price = sc.inverse_transform(predicted_stock_price)
# Visualising the results
plt.plot(real_stock_price, color = 'red', label = 'Real Google Stock Price')
plt.plot(predicted_stock_price, color = 'blue', label = 'Predicted Google Stock Price')
plt.title('Google Stock Price Prediction')
plt.xlabel('Time')
plt.ylabel('Google Stock Price')
plt.legend()
plt.show() | 0.818519 | 0.614249 |
from datetime import datetime
from flask_helpers.ErrorHandler import ErrorHandler
from google.cloud import datastore
from Persistence.AbstractPersister import AbstractPersister
class Persister(AbstractPersister):
def __init__(self):
super(Persister, self).__init__()
self.handler.module="GCPDatastorePersist"
self.handler.log(message="Preparing datastore client")
self.datastore_client = datastore.Client()
self.kind = "save"
key = "validation-save-ignored"
current_date = "{}".format(datetime.now())
# Get a datastore key
try:
self.handler.log(message="Creating datastore key: {}".format(key))
_key = self.datastore_client.key(self.kind, key)
except Exception as e:
print("Exception while getting datastore client - {}".format(str(e)))
self.handler.log(message="In GCPDatastorePersist __init__ an exception occurred: {}".format(repr(e)))
raise
# Create an entity
try:
_save = datastore.Entity(key=_key)
_save['game'] = "validation: {}".format(current_date)
except Exception as e:
print("Exception while getting datastore Entity - {}".format(str(e)))
self.handler.log(message="In GCPDatastorePersist __init__ an exception occurred: {}".format(repr(e)))
raise
# Update the DB
try:
self.datastore_client.put(_save)
except Exception as e:
print("Exception while putting data - {}".format(str(e)))
self.handler.log(message="In GCPDatastorePersist __init__ an exception occurred: {}".format(repr(e)))
raise
self.handler.log(message="Datastore client fetched")
def save(self, key=None, jsonstr=None):
super(Persister, self).save(key=key, jsonstr=jsonstr)
self.handler.log(message="Creating datastore key: {}".format(key))
try:
_key = self.datastore_client.key(self.kind, key)
except Exception as e:
print("Exception - {}".format(str(e)))
return self.handler.error(status=500, message="Exception {}".format(repr(e)))
if not _key:
raise ValueError("The key was returned as None!")
self.handler.log(message="Fetching entity: {}".format(_key))
try:
_save = datastore.Entity(key=_key)
except Exception as e:
print("Exception - {}".format(str(e)))
return self.handler.error(status=500, message="Exception {}".format(repr(e)))
_save["game"] = jsonstr
self.handler.log(message="Writing game to GCP Datastore")
try:
self.datastore_client.put(_save)
except Exception as e:
print("Exception - {}".format(str(e)))
return self.handler.error(status=500, message="Exception {}".format(repr(e)))
def load(self, key=None):
super(Persister, self).load(key=key)
self.handler.log(message="Calling datastore query on key: {}".format(key))
self.handler.log(message="Creating datastore key: {}".format(key))
try:
_key = self.datastore_client.key(self.kind, key)
except Exception as e:
print("Exception - {}".format(str(e)))
return self.handler.error(status=500, message="Exception {}".format(repr(e)))
try:
save = self.datastore_client.get(_key)
if not save:
raise ValueError("Key not found")
except Exception as e:
print("Exception - {}".format(str(e)))
return self.handler.error(status=500, message="Exception {}".format(repr(e)))
self.handler.log(message="Query returned: {}".format(save))
self.handler.log(message="Query returned: {}".format(save["game"]))
return save["game"] | PersistenceExtensions/GCPDatastore.py | from datetime import datetime
from flask_helpers.ErrorHandler import ErrorHandler
from google.cloud import datastore
from Persistence.AbstractPersister import AbstractPersister
class Persister(AbstractPersister):
def __init__(self):
super(Persister, self).__init__()
self.handler.module="GCPDatastorePersist"
self.handler.log(message="Preparing datastore client")
self.datastore_client = datastore.Client()
self.kind = "save"
key = "validation-save-ignored"
current_date = "{}".format(datetime.now())
# Get a datastore key
try:
self.handler.log(message="Creating datastore key: {}".format(key))
_key = self.datastore_client.key(self.kind, key)
except Exception as e:
print("Exception while getting datastore client - {}".format(str(e)))
self.handler.log(message="In GCPDatastorePersist __init__ an exception occurred: {}".format(repr(e)))
raise
# Create an entity
try:
_save = datastore.Entity(key=_key)
_save['game'] = "validation: {}".format(current_date)
except Exception as e:
print("Exception while getting datastore Entity - {}".format(str(e)))
self.handler.log(message="In GCPDatastorePersist __init__ an exception occurred: {}".format(repr(e)))
raise
# Update the DB
try:
self.datastore_client.put(_save)
except Exception as e:
print("Exception while putting data - {}".format(str(e)))
self.handler.log(message="In GCPDatastorePersist __init__ an exception occurred: {}".format(repr(e)))
raise
self.handler.log(message="Datastore client fetched")
def save(self, key=None, jsonstr=None):
super(Persister, self).save(key=key, jsonstr=jsonstr)
self.handler.log(message="Creating datastore key: {}".format(key))
try:
_key = self.datastore_client.key(self.kind, key)
except Exception as e:
print("Exception - {}".format(str(e)))
return self.handler.error(status=500, message="Exception {}".format(repr(e)))
if not _key:
raise ValueError("The key was returned as None!")
self.handler.log(message="Fetching entity: {}".format(_key))
try:
_save = datastore.Entity(key=_key)
except Exception as e:
print("Exception - {}".format(str(e)))
return self.handler.error(status=500, message="Exception {}".format(repr(e)))
_save["game"] = jsonstr
self.handler.log(message="Writing game to GCP Datastore")
try:
self.datastore_client.put(_save)
except Exception as e:
print("Exception - {}".format(str(e)))
return self.handler.error(status=500, message="Exception {}".format(repr(e)))
def load(self, key=None):
super(Persister, self).load(key=key)
self.handler.log(message="Calling datastore query on key: {}".format(key))
self.handler.log(message="Creating datastore key: {}".format(key))
try:
_key = self.datastore_client.key(self.kind, key)
except Exception as e:
print("Exception - {}".format(str(e)))
return self.handler.error(status=500, message="Exception {}".format(repr(e)))
try:
save = self.datastore_client.get(_key)
if not save:
raise ValueError("Key not found")
except Exception as e:
print("Exception - {}".format(str(e)))
return self.handler.error(status=500, message="Exception {}".format(repr(e)))
self.handler.log(message="Query returned: {}".format(save))
self.handler.log(message="Query returned: {}".format(save["game"]))
return save["game"] | 0.440469 | 0.152442 |
from ukfm import SO2, UKF, EKF
from ukfm import LOCALIZATION as MODEL
import ukfm
import numpy as np
import matplotlib
ukfm.utils.set_matplotlib_config()
################################################################################
# We compare the filters on a large number of Monte-Carlo runs.
# Monte-Carlo runs
N_mc = 100
################################################################################
# Simulation Setting
# ==============================================================================
# We set the simulation as in :cite:`barrauInvariant2017`, section IV. The robot
# drives along a 10 m diameter circle for 40 seconds with high rate odometer
# measurements (100 Hz) and low rate GPS measurements (1 Hz). The vehicle gets
# moderate angular velocity uncertainty and highly precise linear velocity. The
# initial values of the heading error is very strong, **45° standard
# deviation**, while the initial position is known.
# sequence time (s)
T = 40
# odometry frequency (Hz)
odo_freq = 100
# create the model
model = MODEL(T, odo_freq)
# odometry noise standard deviation
odo_std = np.array([0.01, # speed (v/m)
0.01, # speed (v/m)
1 / 180 * np.pi]) # angular speed (rad/s)
# GPS frequency (Hz)
gps_freq = 1
# GPS noise standard deviation (m)
gps_std = 1
# radius of the circle trajectory (m)
radius = 5
# initial heading error standard deviation
theta0_std = 45/180*np.pi
################################################################################
# Filter Design
# ==============================================================================
# The UKFs are compared to an Extended Kalman FIlter (EKF) and an Invariant EKF
# (IEKF). The EKF has the same uncertainty representation as the UKF with the
# retraction on :math:`SO(2) \times \mathbb{R}^2`, whereas the IEKF has the same
# uncertainty representation as the UKF with the left retraction on
# :math:`SE(2)`.
# propagation noise covariance matrix
Q = np.diag(odo_std**2)
# measurement noise covariance matrix
R = gps_std**2*np.eye(2)
# initial covariance matrix
P0 = np.zeros((3, 3))
# we take into account initial heading error
P0[0, 0] = theta0_std ** 2
# sigma point parameter
alpha = np.array([1e-3, 1e-3, 1e-3])
################################################################################
# We set error variables before launching Monte-Carlo simulations. As we have
# five similar methods, the code is redundant.
ukf_err = np.zeros((N_mc, model.N, 3))
left_ukf_err = np.zeros_like(ukf_err)
right_ukf_err = np.zeros_like(ukf_err)
iekf_err = np.zeros_like(ukf_err)
ekf_err = np.zeros_like(ukf_err)
################################################################################
# We record Normalized Estimation Error Squared (NEES) for consistency
# evaluation (see Results).
ukf_nees = np.zeros((N_mc, model.N, 2))
left_ukf_nees = np.zeros_like(ukf_nees)
right_ukf_nees = np.zeros_like(ukf_nees)
iekf_nees = np.zeros_like(ukf_nees)
ekf_nees = np.zeros_like(ukf_nees)
################################################################################
# Monte-Carlo Runs
# ==============================================================================
# We run the Monte-Carlo through a for loop.
#
# .. note::
#
# We sample for each Monte-Carlo run an initial heading error from the true
# distribution (:math:`\mathbf{P}_0`). This requires many Monte-Carlo
# samples.
for n_mc in range(N_mc):
print("Monte-Carlo iteration(s): " + str(n_mc + 1) + "/" + str(N_mc))
# simulation true trajectory
states, omegas = model.simu_f(odo_std, radius)
# simulate measurement
ys, one_hot_ys = model.simu_h(states, gps_freq, gps_std)
# initialize filter with inaccurate state
state0 = model.STATE(
Rot=states[0].Rot.dot(SO2.exp(theta0_std * np.random.randn(1))),
p=states[0].p)
# define the filters
ukf = UKF(state0=state0, P0=P0, f=model.f, h=model.h, Q=Q, R=R,
phi=model.phi,
phi_inv=model.phi_inv,
alpha=alpha)
left_ukf = UKF(state0=state0, P0=P0, f=model.f, h=model.h, Q=Q, R=R,
phi=model.left_phi,
phi_inv=model.left_phi_inv,
alpha=alpha)
right_ukf = UKF(state0=state0, P0=P0, f=model.f, h=model.h, Q=Q, R=R,
phi=model.right_phi,
phi_inv=model.right_phi_inv,
alpha=alpha)
iekf = EKF(model=model, state0=state0, P0=P0, Q=Q, R=R,
FG_ana=model.iekf_FG_ana,
H_ana=model.iekf_H_ana,
phi=model.left_phi)
ekf = EKF(model=model, state0=state0, P0=P0, Q=Q, R=R,
FG_ana=model.ekf_FG_ana,
H_ana=model.ekf_H_ana,
phi=model.phi)
# variables for recording estimates of the Monte-Carlo run
ukf_states = [state0]
left_states = [state0]
right_states = [state0]
iekf_states = [state0]
ekf_states = [state0]
ukf_Ps = np.zeros((model.N, 3, 3))
left_ukf_Ps = np.zeros_like(ukf_Ps)
right_ukf_Ps = np.zeros_like(ukf_Ps)
ekf_Ps = np.zeros_like(ukf_Ps)
iekf_Ps = np.zeros_like(ukf_Ps)
ukf_Ps[0] = P0
left_ukf_Ps[0] = P0
right_ukf_Ps[0] = P0
ekf_Ps[0] = P0
iekf_Ps[0] = P0
# measurement iteration number
k = 1
# filtering loop
for n in range(1, model.N):
ukf.propagation(omegas[n-1], model.dt)
left_ukf.propagation(omegas[n-1], model.dt)
right_ukf.propagation(omegas[n-1], model.dt)
iekf.propagation(omegas[n-1], model.dt)
ekf.propagation(omegas[n-1], model.dt)
# update only if a measurement is received
if one_hot_ys[n] == 1:
ukf.update(ys[k])
left_ukf.update(ys[k])
right_ukf.update(ys[k])
iekf.update(ys[k])
ekf.update(ys[k])
k = k + 1
ukf_states.append(ukf.state)
left_states.append(left_ukf.state)
right_states.append(right_ukf.state)
iekf_states.append(iekf.state)
ekf_states.append(ekf.state)
ukf_Ps[n] = ukf.P
left_ukf_Ps[n] = left_ukf.P
right_ukf_Ps[n] = right_ukf.P
iekf_Ps[n] = iekf.P
ekf_Ps[n] = ekf.P
# get state trajectory
Rots, ps = model.get_states(states, model.N)
ukf_Rots, ukf_ps = model.get_states(ukf_states, model.N)
left_ukf_Rots, left_ukf_ps = model.get_states(left_states, model.N)
right_ukf_Rots, right_ukf_ps = model.get_states(right_states, model.N)
iekf_Rots, iekf_ps = model.get_states(iekf_states, model.N)
ekf_Rots, ekf_ps = model.get_states(ekf_states, model.N)
# record errors
ukf_err[n_mc] = model.errors(Rots, ukf_Rots, ps, ukf_ps)
left_ukf_err[n_mc] = model.errors(Rots, left_ukf_Rots, ps, left_ukf_ps)
right_ukf_err[n_mc] = model.errors(Rots, right_ukf_Rots, ps, right_ukf_ps)
iekf_err[n_mc] = model.errors(Rots, iekf_Rots, ps, iekf_ps)
ekf_err[n_mc] = model.errors(Rots, ekf_Rots, ps, ekf_ps)
# record NEES
ukf_nees[n_mc] = model.nees(ukf_err[n_mc], ukf_Ps, ukf_Rots, ukf_ps, 'STD')
left_ukf_nees[n_mc] = model.nees(left_ukf_err[n_mc], left_ukf_Ps,
left_ukf_Rots, left_ukf_ps, 'LEFT')
right_ukf_nees[n_mc] = model.nees(right_ukf_err[n_mc], right_ukf_Ps,
right_ukf_Rots, right_ukf_ps, 'RIGHT')
iekf_nees[n_mc] = model.nees(iekf_err[n_mc], iekf_Ps, iekf_Rots, iekf_ps,
'LEFT')
ekf_nees[n_mc] = model.nees(ekf_err[n_mc], ekf_Ps, ekf_Rots, ekf_ps, 'STD')
################################################################################
# Results
# ==============================================================================
# We first visualize the robot trajectory (for the last run) and the errors
# w.r.t. orientation and position (averaged over Monte-Carlo). As simulations
# have random process, the trajectory plot just gives us an indication but not a
# proof of performances.
ukf_e, left_ukf_e, right_ukf_e, iekf_e, ekf_e = model.benchmark_plot(
ukf_err, left_ukf_err, right_ukf_err, iekf_err, ekf_err, ps, ukf_ps,
left_ukf_ps, right_ukf_ps, ekf_ps, iekf_ps)
################################################################################
# Two groups of filters emerge: group 1) consists of EKF and :math:`SO(2) \times
# \mathbb{R}^2` UKF; and group 2) have IEKF, left :math:`SE(2)` UKF and right
# :math:`SE(2)` UKF (the curves of these filters are superposed). The second
# group is visibly highly better regarding position estimation.
#
# More statictical is to compute the results averaged over all the Monte-Carlo.
# Let us compute the Root Mean Squared Error (RMSE) for each method both for the
# orientation and the position.
model.benchmark_print(ukf_e, left_ukf_e, right_ukf_e, iekf_e, ekf_e)
################################################################################
# They confirm the results on the plot.
#
# A consistency metric is the Normalized Estimation Error Squared (NEES).
# Classical criteria used to evaluate the performance of an estimation method,
# like the RMSE, do not inform about consistency as they do not take into
# account the uncertainty returned by the filter. This point is addressed by the
# NEES, which computes the average squared value of the error, normalized by the
# covariance matrix of the filter. The case NEES>1 reveals an inconsistency
# issue: the actual uncertainty is higher than the computed uncertainty.
model.nees_print(ukf_nees, left_ukf_nees, right_ukf_nees, iekf_nees, ekf_nees)
################################################################################
# As the filters are initialized with perfect position and zero covariance
# w.r.t. position, we compute NEES only after 20 s for avoiding numerical issues
# (during the first secondes of the trajectory the covariance matrix
# :math:`\mathbf{P}_n` is very low so inverting it leads to insignificantly high
# numbers). Results are clear, the :math:`SE(2)` UKF are the more consistent.
################################################################################
# **Which filter is the best ?** In this setting, the **left UKF**, the
# **right UKF** and the IEKF filters obtain similar accurate results, that
# clearly outperform :math:`SO(2) \times \mathbb{R}^2` UKF, and EKF, whereas the
# two UKFs are the more consistent.
#
# .. note::
#
# We have set all the filters with the same "true" noise covariance
# parameters. However, both EKF and UKF based algorithms may better deal ,
# with non-linearity by e.g. inflated propagation noise covariance.
#
################################################################################
# Conclusion
# ==============================================================================
# This script compares different algorithms for 2D robot localization. Two
# groups of filters emerge: the :math:`SO(2) \times \mathbb{R}^2` UKF and the
# EKF represent the first group; and the left :math:`SE(2)` UKF, the right
# :math:`SE(2)` UKF and the IEKF constitute the second group. For the considered
# set of parameters, it is evident that embedded the state in :math:`SE(2)` is
# advantageous for state estimation.
#
# You can now:
#
# * compare the filters in different scenarios. Indeed, UKF and their (I)EKF
# counterparts may obtain different results when noise is e.g. inflated or
# with different initial conditions or different trajectory.
#
# * test the filters in a slightly different model (e.g. with orientation
# measurement), which is straightforward for the UKFs. | docsource/source/auto_benchmark/localization.py | from ukfm import SO2, UKF, EKF
from ukfm import LOCALIZATION as MODEL
import ukfm
import numpy as np
import matplotlib
ukfm.utils.set_matplotlib_config()
################################################################################
# We compare the filters on a large number of Monte-Carlo runs.
# Monte-Carlo runs
N_mc = 100
################################################################################
# Simulation Setting
# ==============================================================================
# We set the simulation as in :cite:`barrauInvariant2017`, section IV. The robot
# drives along a 10 m diameter circle for 40 seconds with high rate odometer
# measurements (100 Hz) and low rate GPS measurements (1 Hz). The vehicle gets
# moderate angular velocity uncertainty and highly precise linear velocity. The
# initial values of the heading error is very strong, **45° standard
# deviation**, while the initial position is known.
# sequence time (s)
T = 40
# odometry frequency (Hz)
odo_freq = 100
# create the model
model = MODEL(T, odo_freq)
# odometry noise standard deviation
odo_std = np.array([0.01, # speed (v/m)
0.01, # speed (v/m)
1 / 180 * np.pi]) # angular speed (rad/s)
# GPS frequency (Hz)
gps_freq = 1
# GPS noise standard deviation (m)
gps_std = 1
# radius of the circle trajectory (m)
radius = 5
# initial heading error standard deviation
theta0_std = 45/180*np.pi
################################################################################
# Filter Design
# ==============================================================================
# The UKFs are compared to an Extended Kalman FIlter (EKF) and an Invariant EKF
# (IEKF). The EKF has the same uncertainty representation as the UKF with the
# retraction on :math:`SO(2) \times \mathbb{R}^2`, whereas the IEKF has the same
# uncertainty representation as the UKF with the left retraction on
# :math:`SE(2)`.
# propagation noise covariance matrix
Q = np.diag(odo_std**2)
# measurement noise covariance matrix
R = gps_std**2*np.eye(2)
# initial covariance matrix
P0 = np.zeros((3, 3))
# we take into account initial heading error
P0[0, 0] = theta0_std ** 2
# sigma point parameter
alpha = np.array([1e-3, 1e-3, 1e-3])
################################################################################
# We set error variables before launching Monte-Carlo simulations. As we have
# five similar methods, the code is redundant.
ukf_err = np.zeros((N_mc, model.N, 3))
left_ukf_err = np.zeros_like(ukf_err)
right_ukf_err = np.zeros_like(ukf_err)
iekf_err = np.zeros_like(ukf_err)
ekf_err = np.zeros_like(ukf_err)
################################################################################
# We record Normalized Estimation Error Squared (NEES) for consistency
# evaluation (see Results).
ukf_nees = np.zeros((N_mc, model.N, 2))
left_ukf_nees = np.zeros_like(ukf_nees)
right_ukf_nees = np.zeros_like(ukf_nees)
iekf_nees = np.zeros_like(ukf_nees)
ekf_nees = np.zeros_like(ukf_nees)
################################################################################
# Monte-Carlo Runs
# ==============================================================================
# We run the Monte-Carlo through a for loop.
#
# .. note::
#
# We sample for each Monte-Carlo run an initial heading error from the true
# distribution (:math:`\mathbf{P}_0`). This requires many Monte-Carlo
# samples.
for n_mc in range(N_mc):
print("Monte-Carlo iteration(s): " + str(n_mc + 1) + "/" + str(N_mc))
# simulation true trajectory
states, omegas = model.simu_f(odo_std, radius)
# simulate measurement
ys, one_hot_ys = model.simu_h(states, gps_freq, gps_std)
# initialize filter with inaccurate state
state0 = model.STATE(
Rot=states[0].Rot.dot(SO2.exp(theta0_std * np.random.randn(1))),
p=states[0].p)
# define the filters
ukf = UKF(state0=state0, P0=P0, f=model.f, h=model.h, Q=Q, R=R,
phi=model.phi,
phi_inv=model.phi_inv,
alpha=alpha)
left_ukf = UKF(state0=state0, P0=P0, f=model.f, h=model.h, Q=Q, R=R,
phi=model.left_phi,
phi_inv=model.left_phi_inv,
alpha=alpha)
right_ukf = UKF(state0=state0, P0=P0, f=model.f, h=model.h, Q=Q, R=R,
phi=model.right_phi,
phi_inv=model.right_phi_inv,
alpha=alpha)
iekf = EKF(model=model, state0=state0, P0=P0, Q=Q, R=R,
FG_ana=model.iekf_FG_ana,
H_ana=model.iekf_H_ana,
phi=model.left_phi)
ekf = EKF(model=model, state0=state0, P0=P0, Q=Q, R=R,
FG_ana=model.ekf_FG_ana,
H_ana=model.ekf_H_ana,
phi=model.phi)
# variables for recording estimates of the Monte-Carlo run
ukf_states = [state0]
left_states = [state0]
right_states = [state0]
iekf_states = [state0]
ekf_states = [state0]
ukf_Ps = np.zeros((model.N, 3, 3))
left_ukf_Ps = np.zeros_like(ukf_Ps)
right_ukf_Ps = np.zeros_like(ukf_Ps)
ekf_Ps = np.zeros_like(ukf_Ps)
iekf_Ps = np.zeros_like(ukf_Ps)
ukf_Ps[0] = P0
left_ukf_Ps[0] = P0
right_ukf_Ps[0] = P0
ekf_Ps[0] = P0
iekf_Ps[0] = P0
# measurement iteration number
k = 1
# filtering loop
for n in range(1, model.N):
ukf.propagation(omegas[n-1], model.dt)
left_ukf.propagation(omegas[n-1], model.dt)
right_ukf.propagation(omegas[n-1], model.dt)
iekf.propagation(omegas[n-1], model.dt)
ekf.propagation(omegas[n-1], model.dt)
# update only if a measurement is received
if one_hot_ys[n] == 1:
ukf.update(ys[k])
left_ukf.update(ys[k])
right_ukf.update(ys[k])
iekf.update(ys[k])
ekf.update(ys[k])
k = k + 1
ukf_states.append(ukf.state)
left_states.append(left_ukf.state)
right_states.append(right_ukf.state)
iekf_states.append(iekf.state)
ekf_states.append(ekf.state)
ukf_Ps[n] = ukf.P
left_ukf_Ps[n] = left_ukf.P
right_ukf_Ps[n] = right_ukf.P
iekf_Ps[n] = iekf.P
ekf_Ps[n] = ekf.P
# get state trajectory
Rots, ps = model.get_states(states, model.N)
ukf_Rots, ukf_ps = model.get_states(ukf_states, model.N)
left_ukf_Rots, left_ukf_ps = model.get_states(left_states, model.N)
right_ukf_Rots, right_ukf_ps = model.get_states(right_states, model.N)
iekf_Rots, iekf_ps = model.get_states(iekf_states, model.N)
ekf_Rots, ekf_ps = model.get_states(ekf_states, model.N)
# record errors
ukf_err[n_mc] = model.errors(Rots, ukf_Rots, ps, ukf_ps)
left_ukf_err[n_mc] = model.errors(Rots, left_ukf_Rots, ps, left_ukf_ps)
right_ukf_err[n_mc] = model.errors(Rots, right_ukf_Rots, ps, right_ukf_ps)
iekf_err[n_mc] = model.errors(Rots, iekf_Rots, ps, iekf_ps)
ekf_err[n_mc] = model.errors(Rots, ekf_Rots, ps, ekf_ps)
# record NEES
ukf_nees[n_mc] = model.nees(ukf_err[n_mc], ukf_Ps, ukf_Rots, ukf_ps, 'STD')
left_ukf_nees[n_mc] = model.nees(left_ukf_err[n_mc], left_ukf_Ps,
left_ukf_Rots, left_ukf_ps, 'LEFT')
right_ukf_nees[n_mc] = model.nees(right_ukf_err[n_mc], right_ukf_Ps,
right_ukf_Rots, right_ukf_ps, 'RIGHT')
iekf_nees[n_mc] = model.nees(iekf_err[n_mc], iekf_Ps, iekf_Rots, iekf_ps,
'LEFT')
ekf_nees[n_mc] = model.nees(ekf_err[n_mc], ekf_Ps, ekf_Rots, ekf_ps, 'STD')
################################################################################
# Results
# ==============================================================================
# We first visualize the robot trajectory (for the last run) and the errors
# w.r.t. orientation and position (averaged over Monte-Carlo). As simulations
# have random process, the trajectory plot just gives us an indication but not a
# proof of performances.
ukf_e, left_ukf_e, right_ukf_e, iekf_e, ekf_e = model.benchmark_plot(
ukf_err, left_ukf_err, right_ukf_err, iekf_err, ekf_err, ps, ukf_ps,
left_ukf_ps, right_ukf_ps, ekf_ps, iekf_ps)
################################################################################
# Two groups of filters emerge: group 1) consists of EKF and :math:`SO(2) \times
# \mathbb{R}^2` UKF; and group 2) have IEKF, left :math:`SE(2)` UKF and right
# :math:`SE(2)` UKF (the curves of these filters are superposed). The second
# group is visibly highly better regarding position estimation.
#
# More statictical is to compute the results averaged over all the Monte-Carlo.
# Let us compute the Root Mean Squared Error (RMSE) for each method both for the
# orientation and the position.
model.benchmark_print(ukf_e, left_ukf_e, right_ukf_e, iekf_e, ekf_e)
################################################################################
# They confirm the results on the plot.
#
# A consistency metric is the Normalized Estimation Error Squared (NEES).
# Classical criteria used to evaluate the performance of an estimation method,
# like the RMSE, do not inform about consistency as they do not take into
# account the uncertainty returned by the filter. This point is addressed by the
# NEES, which computes the average squared value of the error, normalized by the
# covariance matrix of the filter. The case NEES>1 reveals an inconsistency
# issue: the actual uncertainty is higher than the computed uncertainty.
model.nees_print(ukf_nees, left_ukf_nees, right_ukf_nees, iekf_nees, ekf_nees)
################################################################################
# As the filters are initialized with perfect position and zero covariance
# w.r.t. position, we compute NEES only after 20 s for avoiding numerical issues
# (during the first secondes of the trajectory the covariance matrix
# :math:`\mathbf{P}_n` is very low so inverting it leads to insignificantly high
# numbers). Results are clear, the :math:`SE(2)` UKF are the more consistent.
################################################################################
# **Which filter is the best ?** In this setting, the **left UKF**, the
# **right UKF** and the IEKF filters obtain similar accurate results, that
# clearly outperform :math:`SO(2) \times \mathbb{R}^2` UKF, and EKF, whereas the
# two UKFs are the more consistent.
#
# .. note::
#
# We have set all the filters with the same "true" noise covariance
# parameters. However, both EKF and UKF based algorithms may better deal ,
# with non-linearity by e.g. inflated propagation noise covariance.
#
################################################################################
# Conclusion
# ==============================================================================
# This script compares different algorithms for 2D robot localization. Two
# groups of filters emerge: the :math:`SO(2) \times \mathbb{R}^2` UKF and the
# EKF represent the first group; and the left :math:`SE(2)` UKF, the right
# :math:`SE(2)` UKF and the IEKF constitute the second group. For the considered
# set of parameters, it is evident that embedded the state in :math:`SE(2)` is
# advantageous for state estimation.
#
# You can now:
#
# * compare the filters in different scenarios. Indeed, UKF and their (I)EKF
# counterparts may obtain different results when noise is e.g. inflated or
# with different initial conditions or different trajectory.
#
# * test the filters in a slightly different model (e.g. with orientation
# measurement), which is straightforward for the UKFs. | 0.773772 | 0.482063 |
import numpy as np
import pandas as pd
"""# **Looking at the raw dataset**"""
data=pd.read_csv("healthcare-dataset-stroke-data.csv")
data.head()
data.shape
data.describe()
data.dtypes
data.columns
data.size
data.info()
"""# **Explorartory Data Analysis & Feature Engineering**"""
!pip install dataprep
!pip install pandas-profiling
from dataprep.eda import plot
from dataprep.eda import plot_correlation
from dataprep.eda import plot_missing
data=pd.read_csv("healthcare-dataset-stroke-data.csv")
data
data.describe()
#drop id
data.drop(columns=['id'],inplace=True)
#checking missing values
data.isna()
#getting the count of null values in a column
data.isna().sum()
#checking if we have missing data
plot_missing(data)
data=data.fillna(np.mean(data['bmi']))
data.info()
plot(data)
plot(data,'stroke')
plot(data,'smoking_status')
plot(data,'bmi')
plot(data,'heart_disease')
plot_correlation(data)
#converting Marrital Status, Residence and Gender into 0s and 1s
data['gender']=data['gender'].apply(lambda x : 1 if x=='Female' else 0)
data["Residence_type"]=data["Residence_type"].apply(lambda x: 1 if x=="Urban" else 0)
data["ever_married"]=data["ever_married"].apply(lambda x: 1 if x=="Yes" else 0)
#removing the observations that have smoking_status type unknown
data=data[data['smoking_status']!='Unknown']
data.head(12)
data
#using OneHotEncoding for smoking_status, work_type
data_dummies=data[['smoking_status','work_type']]
data_dummies=pd.get_dummies(data_dummies)
data.drop(columns=['smoking_status','work_type'],inplace=True)
data_dummies
data
y=data['stroke']
data.drop(columns=['stroke'],inplace=True)
x=data.merge(data_dummies,left_index=True, right_index=True,how='left')
"""# **Spliting Model into Training & Testing Model**"""
from sklearn.model_selection import train_test_split
X_train,X_test,Y_train,Y_test=train_test_split(x,y,test_size=0.20,random_state=0)
print(X_train.shape)
print(X_test.shape)
print(Y_train.shape)
print(Y_test.shape)
from sklearn.preprocessing import StandardScaler
sc=StandardScaler()
X_train=sc.fit_transform(X_train)
X_test=sc.transform(X_test)
X_train
Y_train
Y_test
"""# **(i) KNN**"""
from sklearn.neighbors import KNeighborsClassifier
knn=KNeighborsClassifier()
knn.fit(X_train,Y_train)
y_pred_knn=knn.predict(X_test)
y_pred_knn
"""# **(ii) SVM**"""
from sklearn.svm import SVC
svm=SVC()
svm.fit(X_train,Y_train)
y_pred_svm=svm.predict(X_test)
y_pred_svm
"""# **(iii) Decision Tree**"""
from sklearn.tree import DecisionTreeClassifier
dtree = DecisionTreeClassifier(criterion='gini',max_depth=None)
dtree.fit(X_train,Y_train)
y_pred_dtree=dtree.predict(X_test)
y_pred_dtree
"""# **(iv)Random Forest**"""
from sklearn.ensemble import RandomForestClassifier
rfc=RandomForestClassifier(n_estimators=500)
rfc.fit(X_train,Y_train)
y_pred_rfc=rfc.predict(X_test)
y_pred_rfc
"""# **(v)XGBoost**"""
from xgboost import XGBClassifier
from sklearn.metrics import accuracy_score
model = XGBClassifier()
#fit the model with the training data
model.fit(X_train,Y_train)
y_pred_model=model.predict(X_test)
y_pred_model
"""# **(vi) Naive Bayes**"""
from sklearn.naive_bayes import GaussianNB
gnb=GaussianNB()
y_pred = gnb.fit(X_train, Y_train).predict(X_test)
y_pred
"""# **Training Accuracy of all the Algorithms**"""
print('K Nearest Neighbor Training Accuracy:',knn.score(X_train,Y_train)*100)
print('SVM Training Accuracy:',svm.score(X_train,Y_train)*100)
print('Decision Tree Training Accuracy:',dtree.score(X_train,Y_train)*100)
print('Random Forest Training Accuracy:',rfc.score(X_train,Y_train)*100)
print('XGBoost Training Accuracy:',model.score(X_train,Y_train)*100)
print('Naive Bayes Training Accuracy:',gnb.score(X_train,Y_train)*100)
"""# **Test Accuracy of all the algorithms**"""
print('K Nearest Neighbor Training Accuracy:',knn.score(X_test,Y_test)*100)
print('SVM Training Accuracy:',svm.score(X_test,Y_test)*100)
print('Decision Tree Training Accuracy:',dtree.score(X_test,Y_test)*100)
print('Random Forest Training Accuracy:',rfc.score(X_test,Y_test)*100)
print('XGBoost Training Accuracy:',model.score(X_test,Y_test)*100)
print('Naive Bayes Accuracy:',gnb.score(X_test,Y_test)*100)
"""# **Accuracy Score of all the Algorithms**"""
from sklearn.metrics import accuracy_score
accuracy_test = accuracy_score(Y_test,y_pred_knn)
print('KNN accuracy_score on test dataset:',(accuracy_test)*100)
accuracy_test = accuracy_score(Y_test,y_pred_svm)
print('SVM accuracy_score on test dataset:',(accuracy_test)*100)
accuracy_test = accuracy_score(Y_test,y_pred_dtree)
print('Decision Tree accuracy_score on test dataset:',(accuracy_test)*100)
accuracy_test = accuracy_score(Y_test,y_pred_rfc)
print('Random Forest accuracy_score on test dataset:',(accuracy_test)*100)
accuracy_test = accuracy_score(Y_test,y_pred_model)
print('XGBoost accuracy_score on test dataset:',(accuracy_test)*100)
accuracy_test = accuracy_score(Y_test,y_pred)
print('Naive Bayes accuracy_score on test dataset:',(accuracy_test)*100)
"""## **Errors**"""
import sklearn.metrics as metrics
mae_knn=metrics.mean_absolute_error(Y_test,y_pred_knn)
mse_knn=metrics.mean_squared_error(Y_test,y_pred_knn)
rmse_knn=np.sqrt(mse_knn)
print('Mean Absolute for KNN is:',mae_knn)
print('Mean Squared Error for KNN is',mse_knn)
print('Root Mean Squared Error for KNN is:',rmse_knn)
mae_svm=metrics.mean_absolute_error(Y_test,y_pred_svm)
mse_svm=metrics.mean_squared_error(Y_test,y_pred_svm)
rmse_svm=np.sqrt(mse_svm)
print('Mean Absolute Error for SVM is:',mae_svm)
print('Mean Squared Error for SVM is:',mse_svm)
print('Root Mean Squared Error for SVM is:',rmse_svm)
mae_dtree=metrics.mean_absolute_error(Y_test,y_pred_dtree)
mse_dtree=metrics.mean_squared_error(Y_test,y_pred_dtree)
rmse_dtree=np.sqrt(mse_dtree)
print('Mean Absolute Error for Decision Tree is:',mae_dtree)
print('Mean Squared Error for Decision Tree is:',mse_dtree)
print('Root Mean Squared Error for Decision Tree is',rmse_dtree)
mae_rfc=metrics.mean_absolute_error(Y_test,y_pred_rfc)
mse_rfc=metrics.mean_squared_error(Y_test,y_pred_rfc)
rmse_rfc=np.sqrt(mse_rfc)
print('Mean Absolute Error for Random Forest is:',mae_rfc)
print('Mean Squared Error for Random Forest is:',mse_rfc)
print('Root Mean Squared Error for Random Forest is:',rmse_rfc)
mae_model=metrics.mean_absolute_error(Y_test,y_pred_model)
mse_model=metrics.mean_squared_error(Y_test,y_pred_model)
rmse_model=np.sqrt(mse_model)
print('Mean Absolute Error for XGBoost is:',mae_model)
print('Mean Squared Error for XGBoost is:',mse_model)
print('Root Mean Squared Error for XGBoost is:',rmse_model)
mae_gnb=metrics.mean_absolute_error(Y_test,y_pred)
mse_gnb=metrics.mean_squared_error(Y_test,y_pred)
rmse_gnb=np.sqrt(mse_gnb)
print('Mean Absolute Error for Naive Bayes is:',mae_gnb)
print('Mean Squared Error for Naive Bayes is:',mse_gnb)
print('Root Mean Squared Error for Naive Bayes is:',rmse_gnb) | advanced_bioinformatics_project.py | import numpy as np
import pandas as pd
"""# **Looking at the raw dataset**"""
data=pd.read_csv("healthcare-dataset-stroke-data.csv")
data.head()
data.shape
data.describe()
data.dtypes
data.columns
data.size
data.info()
"""# **Explorartory Data Analysis & Feature Engineering**"""
!pip install dataprep
!pip install pandas-profiling
from dataprep.eda import plot
from dataprep.eda import plot_correlation
from dataprep.eda import plot_missing
data=pd.read_csv("healthcare-dataset-stroke-data.csv")
data
data.describe()
#drop id
data.drop(columns=['id'],inplace=True)
#checking missing values
data.isna()
#getting the count of null values in a column
data.isna().sum()
#checking if we have missing data
plot_missing(data)
data=data.fillna(np.mean(data['bmi']))
data.info()
plot(data)
plot(data,'stroke')
plot(data,'smoking_status')
plot(data,'bmi')
plot(data,'heart_disease')
plot_correlation(data)
#converting Marrital Status, Residence and Gender into 0s and 1s
data['gender']=data['gender'].apply(lambda x : 1 if x=='Female' else 0)
data["Residence_type"]=data["Residence_type"].apply(lambda x: 1 if x=="Urban" else 0)
data["ever_married"]=data["ever_married"].apply(lambda x: 1 if x=="Yes" else 0)
#removing the observations that have smoking_status type unknown
data=data[data['smoking_status']!='Unknown']
data.head(12)
data
#using OneHotEncoding for smoking_status, work_type
data_dummies=data[['smoking_status','work_type']]
data_dummies=pd.get_dummies(data_dummies)
data.drop(columns=['smoking_status','work_type'],inplace=True)
data_dummies
data
y=data['stroke']
data.drop(columns=['stroke'],inplace=True)
x=data.merge(data_dummies,left_index=True, right_index=True,how='left')
"""# **Spliting Model into Training & Testing Model**"""
from sklearn.model_selection import train_test_split
X_train,X_test,Y_train,Y_test=train_test_split(x,y,test_size=0.20,random_state=0)
print(X_train.shape)
print(X_test.shape)
print(Y_train.shape)
print(Y_test.shape)
from sklearn.preprocessing import StandardScaler
sc=StandardScaler()
X_train=sc.fit_transform(X_train)
X_test=sc.transform(X_test)
X_train
Y_train
Y_test
"""# **(i) KNN**"""
from sklearn.neighbors import KNeighborsClassifier
knn=KNeighborsClassifier()
knn.fit(X_train,Y_train)
y_pred_knn=knn.predict(X_test)
y_pred_knn
"""# **(ii) SVM**"""
from sklearn.svm import SVC
svm=SVC()
svm.fit(X_train,Y_train)
y_pred_svm=svm.predict(X_test)
y_pred_svm
"""# **(iii) Decision Tree**"""
from sklearn.tree import DecisionTreeClassifier
dtree = DecisionTreeClassifier(criterion='gini',max_depth=None)
dtree.fit(X_train,Y_train)
y_pred_dtree=dtree.predict(X_test)
y_pred_dtree
"""# **(iv)Random Forest**"""
from sklearn.ensemble import RandomForestClassifier
rfc=RandomForestClassifier(n_estimators=500)
rfc.fit(X_train,Y_train)
y_pred_rfc=rfc.predict(X_test)
y_pred_rfc
"""# **(v)XGBoost**"""
from xgboost import XGBClassifier
from sklearn.metrics import accuracy_score
model = XGBClassifier()
#fit the model with the training data
model.fit(X_train,Y_train)
y_pred_model=model.predict(X_test)
y_pred_model
"""# **(vi) Naive Bayes**"""
from sklearn.naive_bayes import GaussianNB
gnb=GaussianNB()
y_pred = gnb.fit(X_train, Y_train).predict(X_test)
y_pred
"""# **Training Accuracy of all the Algorithms**"""
print('K Nearest Neighbor Training Accuracy:',knn.score(X_train,Y_train)*100)
print('SVM Training Accuracy:',svm.score(X_train,Y_train)*100)
print('Decision Tree Training Accuracy:',dtree.score(X_train,Y_train)*100)
print('Random Forest Training Accuracy:',rfc.score(X_train,Y_train)*100)
print('XGBoost Training Accuracy:',model.score(X_train,Y_train)*100)
print('Naive Bayes Training Accuracy:',gnb.score(X_train,Y_train)*100)
"""# **Test Accuracy of all the algorithms**"""
print('K Nearest Neighbor Training Accuracy:',knn.score(X_test,Y_test)*100)
print('SVM Training Accuracy:',svm.score(X_test,Y_test)*100)
print('Decision Tree Training Accuracy:',dtree.score(X_test,Y_test)*100)
print('Random Forest Training Accuracy:',rfc.score(X_test,Y_test)*100)
print('XGBoost Training Accuracy:',model.score(X_test,Y_test)*100)
print('Naive Bayes Accuracy:',gnb.score(X_test,Y_test)*100)
"""# **Accuracy Score of all the Algorithms**"""
from sklearn.metrics import accuracy_score
accuracy_test = accuracy_score(Y_test,y_pred_knn)
print('KNN accuracy_score on test dataset:',(accuracy_test)*100)
accuracy_test = accuracy_score(Y_test,y_pred_svm)
print('SVM accuracy_score on test dataset:',(accuracy_test)*100)
accuracy_test = accuracy_score(Y_test,y_pred_dtree)
print('Decision Tree accuracy_score on test dataset:',(accuracy_test)*100)
accuracy_test = accuracy_score(Y_test,y_pred_rfc)
print('Random Forest accuracy_score on test dataset:',(accuracy_test)*100)
accuracy_test = accuracy_score(Y_test,y_pred_model)
print('XGBoost accuracy_score on test dataset:',(accuracy_test)*100)
accuracy_test = accuracy_score(Y_test,y_pred)
print('Naive Bayes accuracy_score on test dataset:',(accuracy_test)*100)
"""## **Errors**"""
import sklearn.metrics as metrics
mae_knn=metrics.mean_absolute_error(Y_test,y_pred_knn)
mse_knn=metrics.mean_squared_error(Y_test,y_pred_knn)
rmse_knn=np.sqrt(mse_knn)
print('Mean Absolute for KNN is:',mae_knn)
print('Mean Squared Error for KNN is',mse_knn)
print('Root Mean Squared Error for KNN is:',rmse_knn)
mae_svm=metrics.mean_absolute_error(Y_test,y_pred_svm)
mse_svm=metrics.mean_squared_error(Y_test,y_pred_svm)
rmse_svm=np.sqrt(mse_svm)
print('Mean Absolute Error for SVM is:',mae_svm)
print('Mean Squared Error for SVM is:',mse_svm)
print('Root Mean Squared Error for SVM is:',rmse_svm)
mae_dtree=metrics.mean_absolute_error(Y_test,y_pred_dtree)
mse_dtree=metrics.mean_squared_error(Y_test,y_pred_dtree)
rmse_dtree=np.sqrt(mse_dtree)
print('Mean Absolute Error for Decision Tree is:',mae_dtree)
print('Mean Squared Error for Decision Tree is:',mse_dtree)
print('Root Mean Squared Error for Decision Tree is',rmse_dtree)
mae_rfc=metrics.mean_absolute_error(Y_test,y_pred_rfc)
mse_rfc=metrics.mean_squared_error(Y_test,y_pred_rfc)
rmse_rfc=np.sqrt(mse_rfc)
print('Mean Absolute Error for Random Forest is:',mae_rfc)
print('Mean Squared Error for Random Forest is:',mse_rfc)
print('Root Mean Squared Error for Random Forest is:',rmse_rfc)
mae_model=metrics.mean_absolute_error(Y_test,y_pred_model)
mse_model=metrics.mean_squared_error(Y_test,y_pred_model)
rmse_model=np.sqrt(mse_model)
print('Mean Absolute Error for XGBoost is:',mae_model)
print('Mean Squared Error for XGBoost is:',mse_model)
print('Root Mean Squared Error for XGBoost is:',rmse_model)
mae_gnb=metrics.mean_absolute_error(Y_test,y_pred)
mse_gnb=metrics.mean_squared_error(Y_test,y_pred)
rmse_gnb=np.sqrt(mse_gnb)
print('Mean Absolute Error for Naive Bayes is:',mae_gnb)
print('Mean Squared Error for Naive Bayes is:',mse_gnb)
print('Root Mean Squared Error for Naive Bayes is:',rmse_gnb) | 0.585338 | 0.362236 |
import itertools
from pathlib import Path
import pytest
from fpdf import FPDF
from fpdf.errors import FPDFException
from fpdf.fonts import fpdf_charwidths
from test.conftest import assert_pdf_equal
HERE = Path(__file__).resolve().parent
def test_no_set_font():
pdf = FPDF()
pdf.add_page()
with pytest.raises(FPDFException) as error:
pdf.text(10, 10, "Hello World!")
expected_msg = "No font set, you need to call set_font() beforehand"
assert str(error.value) == expected_msg
def test_set_unknown_font():
pdf = FPDF()
pdf.add_page()
with pytest.raises(FPDFException) as e:
pdf.set_font("Dummy")
assert (
str(e.value)
== "Undefined font: dummy - Use built-in fonts or FPDF.add_font() beforehand"
)
def test_set_unknown_style():
pdf = FPDF()
pdf.add_page()
with pytest.raises(ValueError) as e:
pdf.set_font("Times", style="bold")
assert (
str(e.value) == "Unknown style provided (only B/I/U letters are allowed): BDLO"
)
def test_set_builtin_font(tmp_path):
pdf = FPDF()
pdf.add_page()
builtin_fonts = sorted(
f for f in pdf.core_fonts if not f.endswith(("B", "I", "BI"))
)
for i, font_name in enumerate(builtin_fonts):
styles = (
("",) if font_name in ("symbol", "zapfdingbats") else ("", "B", "I", "BI")
)
for j, style in enumerate(styles):
pdf.set_font(font_name.capitalize(), style, 36)
pdf.set_font(font_name.lower(), style, 36)
pdf.text(0, 10 + 40 * i + 10 * j, "Hello World!")
assert_pdf_equal(pdf, HERE / "fonts_set_builtin_font.pdf", tmp_path)
def test_issue_66(tmp_path):
pdf = FPDF()
pdf.add_page()
pdf.set_font("Times", "B", 14)
pdf.cell(txt="ABC")
pdf.set_font("Times", size=10)
pdf.cell(txt="DEF")
# Setting the font to an already used one used to remove the text!
pdf.set_font("Times", "B", 14)
assert_pdf_equal(pdf, HERE / "fonts_issue_66.pdf", tmp_path)
def test_set_font_aliases_as_font():
"""Test if font aliases are being converted to their alternatives."""
pdf = FPDF()
pdf.add_page()
aliases = ["ARIAL", "Arial", "arial", "couriernew", "timesnewroman"]
alternatives = ["helvetica", "helvetica", "helvetica", "courier", "times"]
for alias, alternative in zip(aliases, alternatives):
# Test if warning get's emitted
with pytest.warns(
UserWarning,
match=f"Substituting font {alias.lower()} by core font {alternative}",
):
pdf.set_font(alias)
# Test if font family is set correctly
assert pdf.font_family == alternative
# Test if the fonts were added in this order and without duplicats:
# helvetica, courier, times
assert [*pdf.fonts] == ["helvetica", "courier", "times"]
def test_set_font_core_font_attributes():
"""Test if the attributes of added core fonts are correct"""
pdf = FPDF()
pdf.add_page()
pdf.set_font("courier")
pdf.set_font("times")
# Test for the font attributes
assert pdf.fonts["courier"] == {
"i": 1,
"type": "core",
"name": "Courier",
"up": -100,
"ut": 50,
"cw": fpdf_charwidths["courier"],
"fontkey": "courier",
}
assert pdf.fonts["times"] == {
"i": 2,
"type": "core",
"name": "Times-Roman",
"up": -100,
"ut": 50,
"cw": fpdf_charwidths["times"],
"fontkey": "times",
}
def test_set_font_styles():
"""Test the different font styles "B", "I" and "U" and combinations."""
pdf = FPDF()
pdf.add_page()
# Generate all possible combinations of "B", "I" and "U" -> "B", "BI", "BUI" ...
# including "" (no style)
styles = [
"".join(style) for i in range(4) for style in itertools.permutations("BUI", i)
]
for style in styles:
pdf.set_font("Times", style=style)
# Test if underline is set correctly
assert pdf.underline == int("U" in style)
# Test if style is set correctly
style = style.replace("U", "")
if style == "IB":
style = "BI"
assert pdf.font_style == style
def test_set_font_zapfdingbats_symbol_with_style():
"""Test the fonts zapfdingbats and symbol with a style. This should emit a
warning, as these fonts don't have a style."""
pdf = FPDF()
pdf.add_page()
# Generate all possible combinations of "B", "I" and "U" -> "B", "BI", "BUI" ...
# excluding "" (no style)
styles = [
"".join(style)
for i in range(1, 4)
for style in itertools.permutations("BUI", i)
]
for family in ("zapfdingbats", "symbol"):
for style in styles:
if "B" in style or "I" in style:
with pytest.warns(
UserWarning,
match=f"Built-in font {family} only has a single 'style' and "
f"can't be bold or italic",
):
pdf.set_font(family, style=style)
# Test if style is set correctly (== no style)
assert pdf.font_style == ""
# Test if underline is set correctly
assert pdf.underline == int("U" in style) | SMSProject/venv/Lib/site-packages/test/fonts/test_set_font.py | import itertools
from pathlib import Path
import pytest
from fpdf import FPDF
from fpdf.errors import FPDFException
from fpdf.fonts import fpdf_charwidths
from test.conftest import assert_pdf_equal
HERE = Path(__file__).resolve().parent
def test_no_set_font():
pdf = FPDF()
pdf.add_page()
with pytest.raises(FPDFException) as error:
pdf.text(10, 10, "Hello World!")
expected_msg = "No font set, you need to call set_font() beforehand"
assert str(error.value) == expected_msg
def test_set_unknown_font():
pdf = FPDF()
pdf.add_page()
with pytest.raises(FPDFException) as e:
pdf.set_font("Dummy")
assert (
str(e.value)
== "Undefined font: dummy - Use built-in fonts or FPDF.add_font() beforehand"
)
def test_set_unknown_style():
pdf = FPDF()
pdf.add_page()
with pytest.raises(ValueError) as e:
pdf.set_font("Times", style="bold")
assert (
str(e.value) == "Unknown style provided (only B/I/U letters are allowed): BDLO"
)
def test_set_builtin_font(tmp_path):
pdf = FPDF()
pdf.add_page()
builtin_fonts = sorted(
f for f in pdf.core_fonts if not f.endswith(("B", "I", "BI"))
)
for i, font_name in enumerate(builtin_fonts):
styles = (
("",) if font_name in ("symbol", "zapfdingbats") else ("", "B", "I", "BI")
)
for j, style in enumerate(styles):
pdf.set_font(font_name.capitalize(), style, 36)
pdf.set_font(font_name.lower(), style, 36)
pdf.text(0, 10 + 40 * i + 10 * j, "Hello World!")
assert_pdf_equal(pdf, HERE / "fonts_set_builtin_font.pdf", tmp_path)
def test_issue_66(tmp_path):
pdf = FPDF()
pdf.add_page()
pdf.set_font("Times", "B", 14)
pdf.cell(txt="ABC")
pdf.set_font("Times", size=10)
pdf.cell(txt="DEF")
# Setting the font to an already used one used to remove the text!
pdf.set_font("Times", "B", 14)
assert_pdf_equal(pdf, HERE / "fonts_issue_66.pdf", tmp_path)
def test_set_font_aliases_as_font():
"""Test if font aliases are being converted to their alternatives."""
pdf = FPDF()
pdf.add_page()
aliases = ["ARIAL", "Arial", "arial", "couriernew", "timesnewroman"]
alternatives = ["helvetica", "helvetica", "helvetica", "courier", "times"]
for alias, alternative in zip(aliases, alternatives):
# Test if warning get's emitted
with pytest.warns(
UserWarning,
match=f"Substituting font {alias.lower()} by core font {alternative}",
):
pdf.set_font(alias)
# Test if font family is set correctly
assert pdf.font_family == alternative
# Test if the fonts were added in this order and without duplicats:
# helvetica, courier, times
assert [*pdf.fonts] == ["helvetica", "courier", "times"]
def test_set_font_core_font_attributes():
"""Test if the attributes of added core fonts are correct"""
pdf = FPDF()
pdf.add_page()
pdf.set_font("courier")
pdf.set_font("times")
# Test for the font attributes
assert pdf.fonts["courier"] == {
"i": 1,
"type": "core",
"name": "Courier",
"up": -100,
"ut": 50,
"cw": fpdf_charwidths["courier"],
"fontkey": "courier",
}
assert pdf.fonts["times"] == {
"i": 2,
"type": "core",
"name": "Times-Roman",
"up": -100,
"ut": 50,
"cw": fpdf_charwidths["times"],
"fontkey": "times",
}
def test_set_font_styles():
"""Test the different font styles "B", "I" and "U" and combinations."""
pdf = FPDF()
pdf.add_page()
# Generate all possible combinations of "B", "I" and "U" -> "B", "BI", "BUI" ...
# including "" (no style)
styles = [
"".join(style) for i in range(4) for style in itertools.permutations("BUI", i)
]
for style in styles:
pdf.set_font("Times", style=style)
# Test if underline is set correctly
assert pdf.underline == int("U" in style)
# Test if style is set correctly
style = style.replace("U", "")
if style == "IB":
style = "BI"
assert pdf.font_style == style
def test_set_font_zapfdingbats_symbol_with_style():
"""Test the fonts zapfdingbats and symbol with a style. This should emit a
warning, as these fonts don't have a style."""
pdf = FPDF()
pdf.add_page()
# Generate all possible combinations of "B", "I" and "U" -> "B", "BI", "BUI" ...
# excluding "" (no style)
styles = [
"".join(style)
for i in range(1, 4)
for style in itertools.permutations("BUI", i)
]
for family in ("zapfdingbats", "symbol"):
for style in styles:
if "B" in style or "I" in style:
with pytest.warns(
UserWarning,
match=f"Built-in font {family} only has a single 'style' and "
f"can't be bold or italic",
):
pdf.set_font(family, style=style)
# Test if style is set correctly (== no style)
assert pdf.font_style == ""
# Test if underline is set correctly
assert pdf.underline == int("U" in style) | 0.605566 | 0.390011 |
import os
import smpl.util as util
import smpl.log_module as logger
from smpl.package import LibraryPackage
from smpl.config_file import ConfigObject, PackageParms
import smpl.exec as exec
supported_versions = {
"6.2": {
"url": "https://ftp.gnu.org/pub/gnu/ncurses/ncurses-6.2.tar.gz",
"targz": "ncurses-6.2.tar.gz",
"repo_name": "ncurses-6.2"
}
}
class NCurses(LibraryPackage):
def __init__(self, name, parms: PackageParms, cfg_obj: ConfigObject):
super().__init__(name, cfg_obj)
if parms.version not in supported_versions:
v = ", ".join(supported_versions.keys())
raise ValueError(
"config file specifies ncurses version {} can only install version {}".format(parms.version, v))
vers = parms.version
self.name = name
self.parms = parms
self.release = vers
self.package_url = supported_versions[vers]['url']
self.targz = supported_versions[vers]['targz']
self.repo_name = supported_versions[vers]['repo_name']
self.package_targz_file_path = os.path.join(self.cfg_obj.clone_dir, self.targz)
self.wget_output_path = os.path.join(self.cfg_obj.clone_dir, self.targz)
self.package_targz_file_path = os.path.join(self.cfg_obj.clone_dir, self.targz)
self.clone_dir_path = os.path.join(self.cfg_obj.clone_dir, self.repo_name)
def get_package(self):
self.get_and_unpack_tar(self.package_url, self.targz, self.repo_name)
def stage_package(self):
logger.writeln("NCurses stage_package begin")
util.mkdir_p(self.stage_include_dir_path)
# make sure stage/include/boost exists and is empty
util.mkdir_p(self.package_stage_include_dir_path)
util.rm_directory_contents(self.package_stage_include_dir_path)
util.mkdir_p(self.stage_lib_dir_path)
exec.run(["rm", "-rf", "{}/libncurses*".format(self.stage_lib_dir_path)])
exec.run(["rm", "-rf", "{}/libform*".format(self.stage_lib_dir_path)])
exec.run(["rm", "-rf", "{}/libmenu*".format(self.stage_lib_dir_path)])
exec.run(["rm", "-rf", "{}/libform*".format(self.stage_lib_dir_path)])
exec.run([
"./configure",
"--prefix={}".format(self.cfg_obj.vendor_dir),
"--enable-sigwinch",
"--with-normal",
"--with-pthread",
"--with-debug"
], self.clone_dir_path)
exec.run(
['make'], self.clone_dir_path
)
# exec.run([
# "make",
# "install"
# ], self.clone_dir_path
# )
logger.writeln("NCurses stage_package end")
def install_package(self):
exec.run([
"make",
"install"
], self.clone_dir_path
)
# self.headers_from_stage_to_vendor("ncurses", "ncurses")
# self.libs_from_stage_to_vendor("libncurse*.*")
# self.libs_from_stage_to_vendor("libpanel*.*")
# self.libs_from_stage_to_vendor("libform*.*")
# self.libs_from_stage_to_vendor("libmenu*.*") | smpl/ncurses.py | import os
import smpl.util as util
import smpl.log_module as logger
from smpl.package import LibraryPackage
from smpl.config_file import ConfigObject, PackageParms
import smpl.exec as exec
supported_versions = {
"6.2": {
"url": "https://ftp.gnu.org/pub/gnu/ncurses/ncurses-6.2.tar.gz",
"targz": "ncurses-6.2.tar.gz",
"repo_name": "ncurses-6.2"
}
}
class NCurses(LibraryPackage):
def __init__(self, name, parms: PackageParms, cfg_obj: ConfigObject):
super().__init__(name, cfg_obj)
if parms.version not in supported_versions:
v = ", ".join(supported_versions.keys())
raise ValueError(
"config file specifies ncurses version {} can only install version {}".format(parms.version, v))
vers = parms.version
self.name = name
self.parms = parms
self.release = vers
self.package_url = supported_versions[vers]['url']
self.targz = supported_versions[vers]['targz']
self.repo_name = supported_versions[vers]['repo_name']
self.package_targz_file_path = os.path.join(self.cfg_obj.clone_dir, self.targz)
self.wget_output_path = os.path.join(self.cfg_obj.clone_dir, self.targz)
self.package_targz_file_path = os.path.join(self.cfg_obj.clone_dir, self.targz)
self.clone_dir_path = os.path.join(self.cfg_obj.clone_dir, self.repo_name)
def get_package(self):
self.get_and_unpack_tar(self.package_url, self.targz, self.repo_name)
def stage_package(self):
logger.writeln("NCurses stage_package begin")
util.mkdir_p(self.stage_include_dir_path)
# make sure stage/include/boost exists and is empty
util.mkdir_p(self.package_stage_include_dir_path)
util.rm_directory_contents(self.package_stage_include_dir_path)
util.mkdir_p(self.stage_lib_dir_path)
exec.run(["rm", "-rf", "{}/libncurses*".format(self.stage_lib_dir_path)])
exec.run(["rm", "-rf", "{}/libform*".format(self.stage_lib_dir_path)])
exec.run(["rm", "-rf", "{}/libmenu*".format(self.stage_lib_dir_path)])
exec.run(["rm", "-rf", "{}/libform*".format(self.stage_lib_dir_path)])
exec.run([
"./configure",
"--prefix={}".format(self.cfg_obj.vendor_dir),
"--enable-sigwinch",
"--with-normal",
"--with-pthread",
"--with-debug"
], self.clone_dir_path)
exec.run(
['make'], self.clone_dir_path
)
# exec.run([
# "make",
# "install"
# ], self.clone_dir_path
# )
logger.writeln("NCurses stage_package end")
def install_package(self):
exec.run([
"make",
"install"
], self.clone_dir_path
)
# self.headers_from_stage_to_vendor("ncurses", "ncurses")
# self.libs_from_stage_to_vendor("libncurse*.*")
# self.libs_from_stage_to_vendor("libpanel*.*")
# self.libs_from_stage_to_vendor("libform*.*")
# self.libs_from_stage_to_vendor("libmenu*.*") | 0.22414 | 0.120129 |
from keras import layers, models
from keras.utils.generic_utils import register_keras_serializable
from keras.utils.tf_utils import shape_type_conversion
from .aim import AIM
from .sim import SIM
from ...backbone import Backbone
from ...common import ConvBnRelu, ClassificationHead, resize_by_sample
@register_keras_serializable(package='SegMe>MINet')
class MINet(layers.Layer):
def __init__(self, classes, bone_arch, bone_init, bone_train, **kwargs):
super().__init__(**kwargs)
self.input_spec = layers.InputSpec(ndim=4, dtype='uint8')
self.classes = classes
self.bone_arch = bone_arch
self.bone_init = bone_init
self.bone_train = bone_train
@shape_type_conversion
def build(self, input_shape):
self.bone = Backbone(self.bone_arch, self.bone_init, self.bone_train, scales=[2, 4, 8, 16, 32])
self.trans = AIM(filters=(64, 64, 64, 64, 64))
self.sim32 = SIM(32)
self.sim16 = SIM(32)
self.sim8 = SIM(32)
self.sim4 = SIM(32)
self.sim2 = SIM(32)
self.upconv32 = ConvBnRelu(64, 3)
self.upconv16 = ConvBnRelu(64, 3)
self.upconv8 = ConvBnRelu(64, 3)
self.upconv4 = ConvBnRelu(64, 3)
self.upconv2 = ConvBnRelu(32, 3)
self.upconv1 = ConvBnRelu(32, 3)
self.head = ClassificationHead(self.classes)
super().build(input_shape)
def call(self, inputs, **kwargs):
c1, c2, c3, c4, c5 = self.bone(inputs)
out1, out2, out3, out4, out5 = self.trans([c1, c2, c3, c4, c5])
out5 = self.upconv32(layers.add([self.sim32(out5), out5]))
out4 = layers.add([resize_by_sample([out5, out4]), out4])
out4 = self.upconv16(layers.add([self.sim16(out4), out4]))
out3 = layers.add([resize_by_sample([out4, out3]), out3])
out3 = self.upconv8(layers.add([self.sim8(out3), out3]))
out2 = layers.add([resize_by_sample([out3, out2]), out2])
out2 = self.upconv4(layers.add([self.sim4(out2), out2]))
out1 = layers.add([resize_by_sample([out2, out1]), out1])
out1 = self.upconv2(layers.add([self.sim2(out1), out1]))
outputs = self.upconv1(resize_by_sample([out1, inputs]))
return self.head(outputs)
@shape_type_conversion
def compute_output_shape(self, input_shape):
return self.head.compute_output_shape(input_shape)
def compute_output_signature(self, input_signature):
return self.head.compute_output_signature(input_signature)
def get_config(self):
config = super().get_config()
config.update({
'classes': self.classes,
'bone_arch': self.bone_arch,
'bone_init': self.bone_init,
'bone_train': self.bone_train
})
return config
def build_minet(classes, bone_arch='resnet_50', bone_init='imagenet', bone_train=False):
inputs = layers.Input(name='image', shape=[None, None, 3], dtype='uint8')
outputs = MINet(classes, bone_arch=bone_arch, bone_init=bone_init, bone_train=bone_train)(inputs)
model = models.Model(inputs=inputs, outputs=outputs, name='minet')
return model | segme/model/minet/model.py | from keras import layers, models
from keras.utils.generic_utils import register_keras_serializable
from keras.utils.tf_utils import shape_type_conversion
from .aim import AIM
from .sim import SIM
from ...backbone import Backbone
from ...common import ConvBnRelu, ClassificationHead, resize_by_sample
@register_keras_serializable(package='SegMe>MINet')
class MINet(layers.Layer):
def __init__(self, classes, bone_arch, bone_init, bone_train, **kwargs):
super().__init__(**kwargs)
self.input_spec = layers.InputSpec(ndim=4, dtype='uint8')
self.classes = classes
self.bone_arch = bone_arch
self.bone_init = bone_init
self.bone_train = bone_train
@shape_type_conversion
def build(self, input_shape):
self.bone = Backbone(self.bone_arch, self.bone_init, self.bone_train, scales=[2, 4, 8, 16, 32])
self.trans = AIM(filters=(64, 64, 64, 64, 64))
self.sim32 = SIM(32)
self.sim16 = SIM(32)
self.sim8 = SIM(32)
self.sim4 = SIM(32)
self.sim2 = SIM(32)
self.upconv32 = ConvBnRelu(64, 3)
self.upconv16 = ConvBnRelu(64, 3)
self.upconv8 = ConvBnRelu(64, 3)
self.upconv4 = ConvBnRelu(64, 3)
self.upconv2 = ConvBnRelu(32, 3)
self.upconv1 = ConvBnRelu(32, 3)
self.head = ClassificationHead(self.classes)
super().build(input_shape)
def call(self, inputs, **kwargs):
c1, c2, c3, c4, c5 = self.bone(inputs)
out1, out2, out3, out4, out5 = self.trans([c1, c2, c3, c4, c5])
out5 = self.upconv32(layers.add([self.sim32(out5), out5]))
out4 = layers.add([resize_by_sample([out5, out4]), out4])
out4 = self.upconv16(layers.add([self.sim16(out4), out4]))
out3 = layers.add([resize_by_sample([out4, out3]), out3])
out3 = self.upconv8(layers.add([self.sim8(out3), out3]))
out2 = layers.add([resize_by_sample([out3, out2]), out2])
out2 = self.upconv4(layers.add([self.sim4(out2), out2]))
out1 = layers.add([resize_by_sample([out2, out1]), out1])
out1 = self.upconv2(layers.add([self.sim2(out1), out1]))
outputs = self.upconv1(resize_by_sample([out1, inputs]))
return self.head(outputs)
@shape_type_conversion
def compute_output_shape(self, input_shape):
return self.head.compute_output_shape(input_shape)
def compute_output_signature(self, input_signature):
return self.head.compute_output_signature(input_signature)
def get_config(self):
config = super().get_config()
config.update({
'classes': self.classes,
'bone_arch': self.bone_arch,
'bone_init': self.bone_init,
'bone_train': self.bone_train
})
return config
def build_minet(classes, bone_arch='resnet_50', bone_init='imagenet', bone_train=False):
inputs = layers.Input(name='image', shape=[None, None, 3], dtype='uint8')
outputs = MINet(classes, bone_arch=bone_arch, bone_init=bone_init, bone_train=bone_train)(inputs)
model = models.Model(inputs=inputs, outputs=outputs, name='minet')
return model | 0.92801 | 0.276324 |
import requests
from bs4 import BeautifulSoup
import re
import time
import json
from mysql_handle.mysql_conn import MySqlConn
class AreaCodeParse(object):
html_file_path = '/Users/xxxs/Documents/dev-code/html_area_zip_code.txt'
html_file_parsed_path = '/Users/xxxs/Documents/dev-code/html_area_zip_code_parsed.txt'
def request_url(self, date_time_str):
url = "http://192.168.127.12/defaultQuery?defaultQuery?shengji=&diji=-1&xianji="
headers = { # 请求头请求刷新验证码和发送post时需要使用
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:48.0) Gecko/20100101 Firefox/48.0',
'Accept': '*/*',
'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
'Accept-Encoding': 'gzip, deflate'
}
print("get_area_zip_code stat...")
session = requests.Session()
print("get_area_zip_code-url-is:", url)
res = session.get(url, headers=headers)
# 设置编码
data = res.content.decode("GBK", "ignore")
self.soup_from_html(data)
def file_write(self, path, content):
target_file = open(path + date_time_str, 'w')
target_file.write(content)
target_file.close()
def soup_parse(self, content):
soup = BeautifulSoup(content, "html.parser")
info_table = soup.find("table", {"class": "info_table"})
tr_lines = info_table.find_all("tr")
print(date_time_str)
## 写文件
html_file_parsed = open(self.html_file_parsed_path + date_time_str, 'wb')
for tr_line in tr_lines:
if len(tr_line) > 0:
pretty = tr_line.prettify()
new_tr = re.sub('\r?\n', '', pretty) + "\n"
html_file_parsed.write(new_tr.encode("utf-8"))
html_file_parsed.close()
def soup_parse2(self, content):
soup = BeautifulSoup(content, "html.parser")
input_hidden_value = soup.find("input", {"id": "pyArr"})['value'].replace(" ", "")
datas = json.loads(input_hidden_value)
mysql_conn = MySqlConn.get_mysql()
# 使用 cursor() 方法创建一个游标对象 cursor
cursor = mysql_conn.cursor()
for data in datas:
# {'cName': '北京市', 'code': '110000', 'py': 'BeijingShi', 'jp': 'bjs', 'qp': 'BeijingShi'}
# {'cName': '北京市', 'code': '110000', 'py': 'BeijingShi', 'jp': 'bjs', 'qp': 'BeijingShi'}
self.insert_into_mysql(mysql_conn, cursor, (data['cName'], data['code'], data['py'], data['jp'], data['qp']))
cursor.close()
mysql_conn.close()
def soup_from_html(self, content):
# self.soup_parse(content)
self.soup_parse2(content)
def soup_from_text(self, date_time_str):
# python file doc: https://www.crummy.com/software/BeautifulSoup/bs4/doc/index.zh.html
text_file = open(self.html_file_path + date_time_str, encoding="utf-8")
content = text_file.read()
self.soup_parse(content)
def parse_element(self, tree):
items = tree.xpath('//*[@id="center"]/div[3]/table[@class="info_table"]')
print(items)
print(items[0])
for item in items:
print("\n====>>")
print(item.element())
## 写文件
target_file2 = open('/Users/xxxs/Documents/dev-code/html_area_zip_code_res.txt', 'w')
target_file2.write(items)
target_file2.close()
def insert_into_mysql(self, mysql_conn, cursor, data):
insert_sql = ("INSERT INTO dim_city_name_code (cName,code,py,jp, qp) VALUES (%s, %s, %s, %s, %s)")
# 使用 execute() 方法执行 SQL 查询
try:
cursor.execute(insert_sql, data)
mysql_conn.commit()
except Exception as e:
mysql_conn.rollback()
print(str(e))
if __name__ == '__main__':
print("-------------------")
parse = AreaCodeParse()
# date_time_str = time.strftime("%Y-%m-%d%H:%M:%S", time.localtime())
date_time_str = "_" + time.strftime("%Y-%m-%d-%H", time.localtime())
parse.request_url(date_time_str) | pachong/xingzheng_area_code/area_zip_code.py |
import requests
from bs4 import BeautifulSoup
import re
import time
import json
from mysql_handle.mysql_conn import MySqlConn
class AreaCodeParse(object):
html_file_path = '/Users/xxxs/Documents/dev-code/html_area_zip_code.txt'
html_file_parsed_path = '/Users/xxxs/Documents/dev-code/html_area_zip_code_parsed.txt'
def request_url(self, date_time_str):
url = "http://192.168.127.12/defaultQuery?defaultQuery?shengji=&diji=-1&xianji="
headers = { # 请求头请求刷新验证码和发送post时需要使用
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:48.0) Gecko/20100101 Firefox/48.0',
'Accept': '*/*',
'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
'Accept-Encoding': 'gzip, deflate'
}
print("get_area_zip_code stat...")
session = requests.Session()
print("get_area_zip_code-url-is:", url)
res = session.get(url, headers=headers)
# 设置编码
data = res.content.decode("GBK", "ignore")
self.soup_from_html(data)
def file_write(self, path, content):
target_file = open(path + date_time_str, 'w')
target_file.write(content)
target_file.close()
def soup_parse(self, content):
soup = BeautifulSoup(content, "html.parser")
info_table = soup.find("table", {"class": "info_table"})
tr_lines = info_table.find_all("tr")
print(date_time_str)
## 写文件
html_file_parsed = open(self.html_file_parsed_path + date_time_str, 'wb')
for tr_line in tr_lines:
if len(tr_line) > 0:
pretty = tr_line.prettify()
new_tr = re.sub('\r?\n', '', pretty) + "\n"
html_file_parsed.write(new_tr.encode("utf-8"))
html_file_parsed.close()
def soup_parse2(self, content):
soup = BeautifulSoup(content, "html.parser")
input_hidden_value = soup.find("input", {"id": "pyArr"})['value'].replace(" ", "")
datas = json.loads(input_hidden_value)
mysql_conn = MySqlConn.get_mysql()
# 使用 cursor() 方法创建一个游标对象 cursor
cursor = mysql_conn.cursor()
for data in datas:
# {'cName': '北京市', 'code': '110000', 'py': 'BeijingShi', 'jp': 'bjs', 'qp': 'BeijingShi'}
# {'cName': '北京市', 'code': '110000', 'py': 'BeijingShi', 'jp': 'bjs', 'qp': 'BeijingShi'}
self.insert_into_mysql(mysql_conn, cursor, (data['cName'], data['code'], data['py'], data['jp'], data['qp']))
cursor.close()
mysql_conn.close()
def soup_from_html(self, content):
# self.soup_parse(content)
self.soup_parse2(content)
def soup_from_text(self, date_time_str):
# python file doc: https://www.crummy.com/software/BeautifulSoup/bs4/doc/index.zh.html
text_file = open(self.html_file_path + date_time_str, encoding="utf-8")
content = text_file.read()
self.soup_parse(content)
def parse_element(self, tree):
items = tree.xpath('//*[@id="center"]/div[3]/table[@class="info_table"]')
print(items)
print(items[0])
for item in items:
print("\n====>>")
print(item.element())
## 写文件
target_file2 = open('/Users/xxxs/Documents/dev-code/html_area_zip_code_res.txt', 'w')
target_file2.write(items)
target_file2.close()
def insert_into_mysql(self, mysql_conn, cursor, data):
insert_sql = ("INSERT INTO dim_city_name_code (cName,code,py,jp, qp) VALUES (%s, %s, %s, %s, %s)")
# 使用 execute() 方法执行 SQL 查询
try:
cursor.execute(insert_sql, data)
mysql_conn.commit()
except Exception as e:
mysql_conn.rollback()
print(str(e))
if __name__ == '__main__':
print("-------------------")
parse = AreaCodeParse()
# date_time_str = time.strftime("%Y-%m-%d%H:%M:%S", time.localtime())
date_time_str = "_" + time.strftime("%Y-%m-%d-%H", time.localtime())
parse.request_url(date_time_str) | 0.204739 | 0.090856 |
from pygments.style import Style
from pygments.token import Token, Comment, Name, Keyword, Generic, Number
from pygments.token import Operator, String, Text, Error
white = '#ffffff'
bright_orange = '#f26512'
yolk_yellow = '#f8d734'
lemon_yellow = '#BBF34E'
bright_green = '#62d04e'
dark_green = '#0B3222'
dark_red = '#370B22'
medium_grey = '#AEAEAE'
really_dark_blue = '#0d152c'
dark_blue = '#181f35'
medium_blue = '#172247'
light_blue = '#84A7C1'
vivid_blue = '#36428a'
class BlackboardStyle(Style):
color = white
background_color = really_dark_blue
highlight_color = vivid_blue
styles = {
Text: white,
Keyword: yolk_yellow,
Keyword.Constant: lemon_yellow,
#Keyword.Declaration
#Keyword.Namespace
#Keyword.Pseudo
#Keyword.Reserved
Keyword.Type: light_blue,
#Name
#Name.Attribute
Name.Builtin: light_blue,
#Name.Builtin.Pseudo
Name.Class: bright_orange,
Name.Constant: lemon_yellow,
#Name.Decorator
#Name.Entity
#Name.Exception
Name.Function: bright_orange,
#Name.Label
#Name.Namespace
#Name.Other
#Name.Tag
#Name.Variable
#Name.Variable.Class
#Name.Variable.Global
#Name.Variable.Instance
#Literal
#Literal.Date
String: bright_green,
#String.Backtick
#String.Char
#String.Doc
#String.Double
#String.Escape
#String.Heredoc
#String.Interpol
#String.Other
#String.Regex
#String.Single
#String.Symbol
Number: lemon_yellow,
#Number.Float
#Number.Hex
#Number.Integer
#Number.Integer.Long
#Number.Oct
#Operator
#Operator.Word
#Punctuation
Comment: medium_grey,
#Comment.Multiline
#Comment.Preproc
#Comment.Single
#Comment.Special
#Generic
#Generic.Deleted
Generic.Emph: 'italic',
#Generic.Error
#Generic.Heading
#Generic.Inserted
#Generic.Output
#Generic.Prompt
Generic.Strong: 'bold',
#Generic.Subheading
#Generic.Traceback
#Token
#Token.Other
} | qtc_color_themes/blackboard.py | from pygments.style import Style
from pygments.token import Token, Comment, Name, Keyword, Generic, Number
from pygments.token import Operator, String, Text, Error
white = '#ffffff'
bright_orange = '#f26512'
yolk_yellow = '#f8d734'
lemon_yellow = '#BBF34E'
bright_green = '#62d04e'
dark_green = '#0B3222'
dark_red = '#370B22'
medium_grey = '#AEAEAE'
really_dark_blue = '#0d152c'
dark_blue = '#181f35'
medium_blue = '#172247'
light_blue = '#84A7C1'
vivid_blue = '#36428a'
class BlackboardStyle(Style):
color = white
background_color = really_dark_blue
highlight_color = vivid_blue
styles = {
Text: white,
Keyword: yolk_yellow,
Keyword.Constant: lemon_yellow,
#Keyword.Declaration
#Keyword.Namespace
#Keyword.Pseudo
#Keyword.Reserved
Keyword.Type: light_blue,
#Name
#Name.Attribute
Name.Builtin: light_blue,
#Name.Builtin.Pseudo
Name.Class: bright_orange,
Name.Constant: lemon_yellow,
#Name.Decorator
#Name.Entity
#Name.Exception
Name.Function: bright_orange,
#Name.Label
#Name.Namespace
#Name.Other
#Name.Tag
#Name.Variable
#Name.Variable.Class
#Name.Variable.Global
#Name.Variable.Instance
#Literal
#Literal.Date
String: bright_green,
#String.Backtick
#String.Char
#String.Doc
#String.Double
#String.Escape
#String.Heredoc
#String.Interpol
#String.Other
#String.Regex
#String.Single
#String.Symbol
Number: lemon_yellow,
#Number.Float
#Number.Hex
#Number.Integer
#Number.Integer.Long
#Number.Oct
#Operator
#Operator.Word
#Punctuation
Comment: medium_grey,
#Comment.Multiline
#Comment.Preproc
#Comment.Single
#Comment.Special
#Generic
#Generic.Deleted
Generic.Emph: 'italic',
#Generic.Error
#Generic.Heading
#Generic.Inserted
#Generic.Output
#Generic.Prompt
Generic.Strong: 'bold',
#Generic.Subheading
#Generic.Traceback
#Token
#Token.Other
} | 0.477067 | 0.083441 |
from kProcessor.kDataFrame import kDataFrame
class colored_kDataFrame(kDataFrame):
"""colored_kDataFrame class
.. note:: the colored_kDataFrame Inherits all the functions from :class:`kProcessor.kDataFrame` plus other new functions.
*Introduction*:
- The colored_kDataFrame class holds the Kmers colors instead of their count.
- The **color** is an integer represents the targets which contains that kmer.
Example:
**color:** ``1``: represents the transcripts ``transcript_A`` , ``transcript_B`` and ``transcript_C``
**color:** ``2``: represents the transcripts ``transcript_A`` , ``transcript_B``
**kmer:** ``ACTGATCGATCGTACGAC`` has the **color** `2`, that means it's found in both `transcript_A` and `transcript_B`
**kmer:** ``ATAAGCATTTACAGCAAT`` has the **color** `1`, that means it's found in both `transcript_A` , `transcript_B` and `transcript_C`
"""
pass
def getColor(self, kmer):
"""
Get the color of the kmer
:param kmer: Kmer string
:type kmer: str
:return: The color of the kmer
:rtype: int
"""
pass
def getKmerSource(self, kmer):
"""
Get all sample IDs that contains that kmer.
:param kmer: Kmer string
:type kmer: str
:return: List of all samples IDs associated with that kmer.
:rtype: list
"""
def getKmerSourceFromColor(self, color):
"""
Get all sample IDs that contains that kmer.
:param color: Kmer color
:type color: int
:return: List of all samples IDs associated with that color.
:rtype: list
"""
def names_map(self):
"""
Get the names map dictionary that represents sample ID as key and its group name as value.
:return: names map dictionary.
:rtype: dict
"""
def inverse_names_map(self):
"""
Get the names map dictionary that represents group name as key and its sample ID as value.
:return: inverse names map dictionary.
:rtype: dict
"""
@staticmethod
def load(prefix):
"""
Load colored_kDataFrame file from disk.
:param prefix: file path
:type prefix: string
:return: Colored kDataFrame that has been serialized on disk.
:rtype: :class:`kProcessor.colored_kDataFrame`
"""
pass
def get_kDataFrame(self):
"""
Get the kDataFrame object that holds the kmers alongside their colors.
:return: the embedded kDataFrame inside the colored_kDataFrame.
:rtype: :class:`kProcessor.kDataFrame`
""" | kProcessor/colored_kDataFrame.py | from kProcessor.kDataFrame import kDataFrame
class colored_kDataFrame(kDataFrame):
"""colored_kDataFrame class
.. note:: the colored_kDataFrame Inherits all the functions from :class:`kProcessor.kDataFrame` plus other new functions.
*Introduction*:
- The colored_kDataFrame class holds the Kmers colors instead of their count.
- The **color** is an integer represents the targets which contains that kmer.
Example:
**color:** ``1``: represents the transcripts ``transcript_A`` , ``transcript_B`` and ``transcript_C``
**color:** ``2``: represents the transcripts ``transcript_A`` , ``transcript_B``
**kmer:** ``ACTGATCGATCGTACGAC`` has the **color** `2`, that means it's found in both `transcript_A` and `transcript_B`
**kmer:** ``ATAAGCATTTACAGCAAT`` has the **color** `1`, that means it's found in both `transcript_A` , `transcript_B` and `transcript_C`
"""
pass
def getColor(self, kmer):
"""
Get the color of the kmer
:param kmer: Kmer string
:type kmer: str
:return: The color of the kmer
:rtype: int
"""
pass
def getKmerSource(self, kmer):
"""
Get all sample IDs that contains that kmer.
:param kmer: Kmer string
:type kmer: str
:return: List of all samples IDs associated with that kmer.
:rtype: list
"""
def getKmerSourceFromColor(self, color):
"""
Get all sample IDs that contains that kmer.
:param color: Kmer color
:type color: int
:return: List of all samples IDs associated with that color.
:rtype: list
"""
def names_map(self):
"""
Get the names map dictionary that represents sample ID as key and its group name as value.
:return: names map dictionary.
:rtype: dict
"""
def inverse_names_map(self):
"""
Get the names map dictionary that represents group name as key and its sample ID as value.
:return: inverse names map dictionary.
:rtype: dict
"""
@staticmethod
def load(prefix):
"""
Load colored_kDataFrame file from disk.
:param prefix: file path
:type prefix: string
:return: Colored kDataFrame that has been serialized on disk.
:rtype: :class:`kProcessor.colored_kDataFrame`
"""
pass
def get_kDataFrame(self):
"""
Get the kDataFrame object that holds the kmers alongside their colors.
:return: the embedded kDataFrame inside the colored_kDataFrame.
:rtype: :class:`kProcessor.kDataFrame`
""" | 0.913032 | 0.807081 |
from layout import datatypes
from . import root
class AlignLM(root.LayoutManager):
"""
A layout manager that takes one element and aligns it according to
the given parameters, optionally within a box of at least a given
size. Several of the other layout managers do some alignment as
part of their normal behavior.
"""
#: Align the element to the top of the space.
ALIGN_TOP = 0
#: Align the element to the vertical middle of the space.
ALIGN_MIDDLE = 1
#: Align the element to the bottom of the space.
ALIGN_BOTTOM = 2
#: Align the element to top and bottom, making it grow vertically.
GROW_Y = 3
#: Align the element to the left of the space.
ALIGN_LEFT = 10
#: Align the element to the horizontal center of the space.
ALIGN_CENTER = 11
#: Align the element to the right of the space.
ALIGN_RIGHT = 12
#: Align the element to left and right, making it grow horizontally.
GROW_X = 13
def __init__(self,
min_width=0, min_height=0,
horizontal_align=ALIGN_LEFT,
vertical_align=ALIGN_TOP,
element=None):
"""
Arguments:
``min_width``
The minimum width to reserve, even if the managed element
is smaller.
``min_height``
The minimum height to reserve, even if the managed element
is smaller.
``horizontal_align``
One of the constants defined in this class for how the
element should be aligned horizontally within its space
(default: :data:`ALIGN_LEFT`)
``vertcal_align``
One of the constants defined in this class for how the
element should be aligned vertically within its space
(default: :data:`ALIGN_TOP`)
"""
self.horizontal_align = horizontal_align
self.vertical_align = vertical_align
self.element = element
self.min_width = min_width
self.min_height = min_height
def get_minimum_size(self, data):
"""Returns the minimum size of the managed element, as long as
it is larger than any manually set minima."""
size = self.element.get_minimum_size(data)
return datatypes.Point(
max(size.x, self.min_width),
max(size.y, self.min_height)
)
def render(self, rect, data):
"""Draws the managed element in the correct alignment."""
# We can't use our get minimum size, because that enforces
# the size limits.
size = self.element.get_minimum_size(data)
# Assume we're bottom left at our natural size.
x = rect.x
y = rect.y
w = size.x
h = size.y
extra_width = rect.w - w
extra_height = rect.h - h
if self.horizontal_align == AlignLM.ALIGN_CENTER:
x += extra_width * 0.5
elif self.horizontal_align == AlignLM.ALIGN_RIGHT:
x += extra_width
elif self.horizontal_align == AlignLM.GROW_X:
w = rect.w
if self.vertical_align == AlignLM.ALIGN_MIDDLE:
y += extra_height * 0.5
elif self.vertical_align == AlignLM.ALIGN_TOP:
y += extra_height
elif self.vertical_align == AlignLM.GROW_Y:
h = rect.h
self.element.render(datatypes.Rectangle(x, y, w, h), data) | layout/managers/align.py | from layout import datatypes
from . import root
class AlignLM(root.LayoutManager):
"""
A layout manager that takes one element and aligns it according to
the given parameters, optionally within a box of at least a given
size. Several of the other layout managers do some alignment as
part of their normal behavior.
"""
#: Align the element to the top of the space.
ALIGN_TOP = 0
#: Align the element to the vertical middle of the space.
ALIGN_MIDDLE = 1
#: Align the element to the bottom of the space.
ALIGN_BOTTOM = 2
#: Align the element to top and bottom, making it grow vertically.
GROW_Y = 3
#: Align the element to the left of the space.
ALIGN_LEFT = 10
#: Align the element to the horizontal center of the space.
ALIGN_CENTER = 11
#: Align the element to the right of the space.
ALIGN_RIGHT = 12
#: Align the element to left and right, making it grow horizontally.
GROW_X = 13
def __init__(self,
min_width=0, min_height=0,
horizontal_align=ALIGN_LEFT,
vertical_align=ALIGN_TOP,
element=None):
"""
Arguments:
``min_width``
The minimum width to reserve, even if the managed element
is smaller.
``min_height``
The minimum height to reserve, even if the managed element
is smaller.
``horizontal_align``
One of the constants defined in this class for how the
element should be aligned horizontally within its space
(default: :data:`ALIGN_LEFT`)
``vertcal_align``
One of the constants defined in this class for how the
element should be aligned vertically within its space
(default: :data:`ALIGN_TOP`)
"""
self.horizontal_align = horizontal_align
self.vertical_align = vertical_align
self.element = element
self.min_width = min_width
self.min_height = min_height
def get_minimum_size(self, data):
"""Returns the minimum size of the managed element, as long as
it is larger than any manually set minima."""
size = self.element.get_minimum_size(data)
return datatypes.Point(
max(size.x, self.min_width),
max(size.y, self.min_height)
)
def render(self, rect, data):
"""Draws the managed element in the correct alignment."""
# We can't use our get minimum size, because that enforces
# the size limits.
size = self.element.get_minimum_size(data)
# Assume we're bottom left at our natural size.
x = rect.x
y = rect.y
w = size.x
h = size.y
extra_width = rect.w - w
extra_height = rect.h - h
if self.horizontal_align == AlignLM.ALIGN_CENTER:
x += extra_width * 0.5
elif self.horizontal_align == AlignLM.ALIGN_RIGHT:
x += extra_width
elif self.horizontal_align == AlignLM.GROW_X:
w = rect.w
if self.vertical_align == AlignLM.ALIGN_MIDDLE:
y += extra_height * 0.5
elif self.vertical_align == AlignLM.ALIGN_TOP:
y += extra_height
elif self.vertical_align == AlignLM.GROW_Y:
h = rect.h
self.element.render(datatypes.Rectangle(x, y, w, h), data) | 0.926458 | 0.560974 |
import argparse
import subprocess
import sys
import os
import shutil
import sysconfig
def vcpkg_root_dir():
return os.path.abspath(os.path.join(os.path.dirname(__file__)))
def run_vcpkg(triplet, vcpkg_args):
if not shutil.which("vcpkg"):
raise RuntimeError("vcpkg executable not found in the PATH environment")
args = ["vcpkg", "--vcpkg-root", vcpkg_root_dir()]
if triplet:
args += ["--triplet", triplet]
args += vcpkg_args
subprocess.check_call(args)
def run_vcpkg_output(triplet, vcpkg_args):
if not shutil.which("vcpkg"):
raise RuntimeError("vcpkg executable not found in the PATH environment")
args = ["vcpkg", "--vcpkg-root", vcpkg_root_dir()]
if triplet:
args += ["--triplet", triplet]
args += vcpkg_args
return subprocess.check_output(args).decode("UTF-8")
def vcpkg_list_ports(triplet):
args = ["list"]
ports = set()
for line in run_vcpkg_output(triplet, args).splitlines():
name, trip = tuple(line.split()[0].split(":"))
if triplet is None or trip == triplet:
if not "[" in name:
ports.add(name)
return ports
def clean(triplet, all):
if triplet is None:
shutil.rmtree(os.path.join(vcpkg_root_dir(), "installed"))
shutil.rmtree(os.path.join(vcpkg_root_dir(), "buildtrees"))
shutil.rmtree(os.path.join(vcpkg_root_dir(), "packages"))
return
for directory in os.listdir(os.path.join(vcpkg_root_dir(), "packages")):
package, package_triplet = tuple(directory.split("_"))
if package.startswith("."):
continue
if package_triplet == triplet:
shutil.rmtree(os.path.join(vcpkg_root_dir(), "packages", directory))
ports = vcpkg_list_ports(triplet)
if len(ports) > 0:
run_vcpkg(triplet, ["remove", "--recurse"] + list(ports))
if __name__ == "__main__":
try:
parser = argparse.ArgumentParser(description="Bootstrap vcpkg ports.")
parser.add_argument(
"-t",
"--triplet",
dest="triplet",
metavar="TRIPLET",
help="the triplet to use",
)
parser.add_argument(
"-a", "--all", dest="all", help="also delete the installed directory"
)
args = parser.parse_args()
clean(args.triplet, args.all)
except KeyboardInterrupt:
print("Interrupted")
sys.exit(-1)
except RuntimeError as e:
print(e)
sys.exit(-1) | clean.py | import argparse
import subprocess
import sys
import os
import shutil
import sysconfig
def vcpkg_root_dir():
return os.path.abspath(os.path.join(os.path.dirname(__file__)))
def run_vcpkg(triplet, vcpkg_args):
if not shutil.which("vcpkg"):
raise RuntimeError("vcpkg executable not found in the PATH environment")
args = ["vcpkg", "--vcpkg-root", vcpkg_root_dir()]
if triplet:
args += ["--triplet", triplet]
args += vcpkg_args
subprocess.check_call(args)
def run_vcpkg_output(triplet, vcpkg_args):
if not shutil.which("vcpkg"):
raise RuntimeError("vcpkg executable not found in the PATH environment")
args = ["vcpkg", "--vcpkg-root", vcpkg_root_dir()]
if triplet:
args += ["--triplet", triplet]
args += vcpkg_args
return subprocess.check_output(args).decode("UTF-8")
def vcpkg_list_ports(triplet):
args = ["list"]
ports = set()
for line in run_vcpkg_output(triplet, args).splitlines():
name, trip = tuple(line.split()[0].split(":"))
if triplet is None or trip == triplet:
if not "[" in name:
ports.add(name)
return ports
def clean(triplet, all):
if triplet is None:
shutil.rmtree(os.path.join(vcpkg_root_dir(), "installed"))
shutil.rmtree(os.path.join(vcpkg_root_dir(), "buildtrees"))
shutil.rmtree(os.path.join(vcpkg_root_dir(), "packages"))
return
for directory in os.listdir(os.path.join(vcpkg_root_dir(), "packages")):
package, package_triplet = tuple(directory.split("_"))
if package.startswith("."):
continue
if package_triplet == triplet:
shutil.rmtree(os.path.join(vcpkg_root_dir(), "packages", directory))
ports = vcpkg_list_ports(triplet)
if len(ports) > 0:
run_vcpkg(triplet, ["remove", "--recurse"] + list(ports))
if __name__ == "__main__":
try:
parser = argparse.ArgumentParser(description="Bootstrap vcpkg ports.")
parser.add_argument(
"-t",
"--triplet",
dest="triplet",
metavar="TRIPLET",
help="the triplet to use",
)
parser.add_argument(
"-a", "--all", dest="all", help="also delete the installed directory"
)
args = parser.parse_args()
clean(args.triplet, args.all)
except KeyboardInterrupt:
print("Interrupted")
sys.exit(-1)
except RuntimeError as e:
print(e)
sys.exit(-1) | 0.309128 | 0.104981 |
import json
class apiCallsWrapper(object):
def __init__(self, access_hostname, account_switch_key):
self.access_hostname = access_hostname
if account_switch_key != None:
self.account_switch_key = '&accountSwitchKey=' + account_switch_key
else:
self.account_switch_key = ''
headers = {
"Content-Type": "application/json"
}
def checkAuthorization(self, session):
"""
Function to check permissions granted for Credentials
"""
get_credential_details_url = 'https://' + self.access_hostname + "/-/client-api/active-grants/implicit"
if '?' in get_credential_details_url:
get_credential_details_url = get_credential_details_url + self.account_switch_key
else:
#Replace & with ? if there is no query string in URL and DO NOT override object property account_switch_key
account_switch_key = self.account_switch_key.translate(self.account_switch_key.maketrans('&','?'))
get_credential_details_url = get_credential_details_url + account_switch_key
credential_details_response = session.get(get_credential_details_url)
return credential_details_response
def createCpcode(self,session, contractId, groupId, productId, cpcode_name):
"""
Function to create cpcode
"""
newCpcodeData = """
{
"productId": "%s",
"cpcodeName": "%s"
}
""" % (productId,cpcode_name)
create_cpcode_url = 'https://' + self.access_hostname + '/papi/v1/cpcodes?contractId=' + contractId + '&groupId=' + groupId
if '?' in create_cpcode_url:
create_cpcode_url = create_cpcode_url + self.account_switch_key
else:
#Replace & with ? if there is no query string in URL and DO NOT override object property account_switch_key
account_switch_key = self.account_switch_key.translate(self.account_switch_key.maketrans('&','?'))
create_cpcode_url = create_cpcode_url + account_switch_key
create_cpcode_response = session.post(create_cpcode_url, data=newCpcodeData,headers=self.headers)
return create_cpcode_response
def createProperty(self, session, contractId, groupId, productId, property_name):
"""
Function to create property
"""
newPropertyData = """
{
"productId": "%s",
"propertyName": "%s"
}
""" % (productId,property_name)
create_property_url = 'https://' + self.access_hostname + '/papi/v1/properties?contractId=' + contractId + '&groupId=' + groupId
if '?' in create_property_url:
create_property_url = create_property_url + self.account_switch_key
else:
#Replace & with ? if there is no query string in URL and DO NOT override object property account_switch_key
account_switch_key = self.account_switch_key.translate(self.account_switch_key.maketrans('&','?'))
create_property_url = create_property_url + account_switch_key
create_property_response = session.post(create_property_url, data=newPropertyData,headers=self.headers)
return create_property_response
def updatePropertyRules(self, session, contractId, groupId, propertyId, ruleFormat, ruletree):
"""
Function to update property rules
"""
headers = {
"Content-Type": "application/vnd.akamai.papirules.latest+json"
}
if ruleFormat != 'latest':
version_string = "application/vnd.akamai.papirules." + str(ruleFormat) + "+json"
headers["Content-Type"] = version_string
update_property_url = 'https://' + self.access_hostname + '/papi/v1/properties/' + propertyId +'/versions/1/rules?contractId=' + contractId + '&groupId=' + groupId + '&validateRules=false'
if '?' in update_property_url:
update_property_url = update_property_url + self.account_switch_key
else:
#Replace & with ? if there is no query string in URL and DO NOT override object property account_switch_key
account_switch_key = self.account_switch_key.translate(self.account_switch_key.maketrans('&','?'))
update_property_url = update_property_url + account_switch_key
update_property_response = session.put(update_property_url, data=ruletree,headers=headers)
return update_property_response
def createEdgehostnameArray(self, hostname_list, edge_hostname_id):
"""
Function to create Edgehostname array for existing edgehostnames
"""
edgehostname_list = []
for eachHostname in hostname_list:
edgehostnameDetails = {}
edgehostnameDetails['cnameType'] = 'EDGE_HOSTNAME'
edgehostnameDetails['edgeHostnameId'] = edge_hostname_id
edgehostnameDetails['cnameFrom'] = eachHostname
edgehostname_list.append(edgehostnameDetails)
return edgehostname_list
def checkEdgeHostname(self, session, edge_hostname):
"""
Function to check the validity of edge_hostname
"""
dns_zone = ''
record_name_substring = edge_hostname
if str(edge_hostname).endswith('edgekey.net'):
dns_zone = 'edgekey.net'
record_name_substring = str(edge_hostname).split('.edgekey.net')[0]
elif str(edge_hostname).endswith('edgesuite.net'):
dns_zone = 'edgesuite.net'
record_name_substring = str(edge_hostname).split('.edgesuite.net')[0]
get_edgehostnameid_url = 'https://' + self.access_hostname + "/hapi/v1/edge-hostnames?recordNameSubstring=" + record_name_substring + '&dnsZone=' + dns_zone
if '?' in get_edgehostnameid_url:
get_edgehostnameid_url = get_edgehostnameid_url + self.account_switch_key
else:
#Replace & with ? if there is no query string in URL and DO NOT override object property account_switch_key
account_switch_key = self.account_switch_key.translate(self.account_switch_key.maketrans('&','?'))
get_edgehostnameid_url = get_edgehostnameid_url + account_switch_key
edgehostname_response = session.get(get_edgehostnameid_url)
return edgehostname_response
def updatePropertyHostname(self, session, contractId, groupId, propertyId, edgehostnamedata):
"""
Function to update property hostnames and edgehostname
"""
update_prop_hostname_url = 'https://' + self.access_hostname + '/papi/v1/properties/' + propertyId + '/versions/1/hostnames?contractId=' + contractId + '&groupId=' + groupId + '&validateHostnames=true'
if '?' in update_prop_hostname_url:
update_prop_hostname_url = update_prop_hostname_url + self.account_switch_key
else:
account_switch_key = self.account_switch_key.translate(self.account_switch_key.maketrans('&','?'))
update_prop_hostname_url = update_prop_hostname_url + account_switch_key
update_prop_hostname_response = session.put(update_prop_hostname_url, data=edgehostnamedata, headers=self.headers)
return update_prop_hostname_response
def pollActivationStatus(self, session, contractId, groupId, propertyId, activationId):
"""
Function to poll Activation Status
"""
poll_activation_url = 'https://' + self.access_hostname + '/papi/v1/properties/' + propertyId + '/activations/' + activationId + '?contractId=' + contractId + '&groupId=' + groupId
if '?' in poll_activation_url:
poll_activation_url = poll_activation_url + self.account_switch_key
else:
account_switch_key = self.account_switch_key.translate(self.account_switch_key.maketrans('&','?'))
poll_activation_url = poll_activation_url + account_switch_key
poll_activation_response = session.get(poll_activation_url)
return poll_activation_response
def activateConfiguration(self, session,propertyName, contractId, groupId, propertyId, version, network, emailList, notes):
"""
Function to activate a configuration or property
Parameters
----------
session : <string>
An EdgeGrid Auth akamai session object
property_name: <string>
Property or configuration name
version : <int>
version number to be activated
network : <string>
network type on which configuration has to be activated on
emailList : <string>
List of emailIds separated by comma to be notified
notes : <string>
Notes that describes the activation reason
Returns
-------
activationResponse : activationResponse
(activationResponse) Object with all response details.
"""
emails = json.dumps(emailList)
activationDetails = """
{
"propertyVersion": %s,
"network": "%s",
"note": "%s",
"notifyEmails": %s,
"complianceRecord": {
"noncomplianceReason": "NO_PRODUCTION_TRAFFIC"
}
} """ % (version,network,notes,emails)
actUrl = 'https://' + self.access_hostname + '/papi/v0/properties/'+ propertyId + '/activations/?contractId=' + contractId +'&groupId=' + groupId + '&acknowledgeAllWarnings=true'
if '?' in actUrl:
actUrl = actUrl + self.account_switch_key
else:
account_switch_key = self.account_switch_key.translate(self.account_switch_key.maketrans('&','?'))
actUrl = actUrl + account_switch_key
activationResponse = session.post(actUrl, data=activationDetails, headers=self.headers)
try:
if activationResponse.status_code == 400 and activationResponse.json()['detail'].find('following activation warnings must be acknowledged'):
acknowledgeWarnings = []
for eachWarning in activationResponse.json()['warnings']:
#print("WARNING: " + eachWarning['detail'])
acknowledgeWarnings.append(eachWarning['messageId'])
acknowledgeWarningsJson = json.dumps(acknowledgeWarnings)
print("Automatically acknowledging warnings")
#The details has to be within the three double quote or comment format
updatedactivationDetails = """
{
"propertyVersion": %s,
"network": "%s",
"note": "%s",
"notifyEmails": %s,
"acknowledgeWarnings": %s,
"complianceRecord": {
"noncomplianceReason": "NO_PRODUCTION_TRAFFIC"
}
} """ % (version,network,notes,emails,acknowledgeWarningsJson)
print('Activating property ' + propertyName + ' v1 on ' + network)
updatedactivationResponse = session.post(actUrl,data=updatedactivationDetails,headers=self.headers)
if updatedactivationResponse.status_code == 201:
#print("Here is the activation link, that can be used to track:\n")
#print(updatedactivationResponse.json()['activationLink'])
return updatedactivationResponse
else:
return updatedactivationResponse
elif activationResponse.status_code == 422 and activationResponse.json()['detail'].find('version already activated'):
print("Property version already activated")
return activationResponse
elif activationResponse.status_code == 404 and activationResponse.json()['detail'].find('unable to locate'):
print("The system was unable to locate the requested version of configuration")
return activationResponse
except KeyError:
print("Looks like there is some error in configuration. Unable to activate configuration at this moment\n")
return activationResponse
def getProductsByContract(self, session, contractId):
"""
Function to get product ids for a contract
"""
get_products_url = 'https://' + self.access_hostname + '/papi/v1/products?contractId=' + str(contractId)
if '?' in get_products_url:
get_products_url = get_products_url + self.account_switch_key
else:
#Replace & with ? if there is no query string in URL and DO NOT override object property account_switch_key
account_switch_key = self.account_switch_key.translate(self.account_switch_key.maketrans('&','?'))
get_products_url = get_products_url + account_switch_key
get_products_response = session.get(get_products_url)
return get_products_response
def createEdgehostname(self, session, productId, domainPrefix, secureNetwork, certEnrollmentId, slotNumber, contractId, groupId):
"""
Function to Create a edgehostname
"""
#Create a edgehostname
create_edgehostname_url = 'https://' + self.access_hostname + '/papi/v1/edgehostnames?contractId=' + contractId + '&groupId=' + groupId
if '?' in create_edgehostname_url:
create_edgehostname_url = create_edgehostname_url + self.account_switch_key
else:
account_switch_key = self.account_switch_key.translate(self.account_switch_key.maketrans('&','?'))
create_edgehostname_url = create_edgehostname_url + account_switch_key
if secureNetwork == 'ENHANCED_TLS':
edgehostname_content = """
{
"productId": "%s",
"domainPrefix": "%s",
"domainSuffix": "edgekey.net",
"secureNetwork": "%s",
"ipVersionBehavior": "IPV4",
"certEnrollmentId": %s,
"slotNumber": %s
}""" % (productId, domainPrefix, secureNetwork, certEnrollmentId, slotNumber)
print('\nTrying to create edge_hostname: ' + domainPrefix + '.edgekey.net')
elif secureNetwork == 'STANDARD_TLS':
edgehostname_content = """
{
"productId": "%s",
"domainPrefix": "%s",
"domainSuffix": "edgesuite.net",
"secureNetwork": "%s",
"ipVersionBehavior": "IPV4"
}""" % (productId, domainPrefix, secureNetwork)
print('\nTrying to create edge_hostname: ' + domainPrefix + '.edgesuite.net')
#print(edgehostname_content)
create_edgehostname_response = session.post(create_edgehostname_url,data=edgehostname_content,headers=self.headers)
if create_edgehostname_response.status_code == 201:
edgehostnameId = create_edgehostname_response.json()['edgeHostnameLink'].split('?')[0].split('/')[4]
print('Successfully created edge_hostname: ' + str(edgehostnameId))
return edgehostnameId
else:
print(json.dumps(create_edgehostname_response.json(), indent=4))
return -1
def create_enrollment(self, session, contractId, data, allowDuplicateCn=True):
"""
Function to Create an Enrollment
Parameters
-----------
session : <string>
An EdgeGrid Auth akamai session object
Returns
-------
create_enrollmentRespose : create_enrollmentRespose
(create_enrollmentRespose) Object with all details
"""
headers = {
"Content-Type": "application/vnd.akamai.cps.enrollment.v4+json",
"Accept": "application/vnd.akamai.cps.enrollment-status.v1+json"
}
create_enrollment_url = 'https://' + self.access_hostname + \
'/cps/v2/enrollments?contractId=' + contractId
if allowDuplicateCn:
create_enrollment_url = create_enrollment_url + '&allow-duplicate-cn=true'
if '?' in create_enrollment_url:
create_enrollment_url = create_enrollment_url + self.account_switch_key
else:
#Replace & with ? if there is no query string in URL
self.account_switch_key = self.account_switch_key.translate(self.account_switch_key.maketrans('&','?'))
create_enrollment_url = create_enrollment_url + self.account_switch_key
create_enrollment_response = session.post(create_enrollment_url, data=data, headers=headers)
return create_enrollment_response
def getWafConfigurations(self, session):
"""
Function to get WAF policy versions
"""
get_waf_configs_url = 'https://' + self.access_hostname + '/appsec/v1/configs/'
if '?' in get_waf_configs_url:
get_waf_configs_url = get_waf_configs_url + self.account_switch_key
else:
#Replace & with ? if there is no query string in URL and DO NOT override object property account_switch_key
account_switch_key = self.account_switch_key.translate(self.account_switch_key.maketrans('&','?'))
get_waf_configs_url = get_waf_configs_url + account_switch_key
get_waf_configs_response = session.get(get_waf_configs_url)
return get_waf_configs_response
def getWafConfigVersions(self, session, config_id):
"""
Function to get WAF configs
"""
get_waf_configversions_url = 'https://' + self.access_hostname + '/appsec/v1/configs/' + str(config_id) + '/versions?page=1&pageSize=10&detail=true'
if '?' in get_waf_configversions_url:
get_waf_configversions_url = get_waf_configversions_url + self.account_switch_key
else:
#Replace & with ? if there is no query string in URL and DO NOT override object property account_switch_key
account_switch_key = self.account_switch_key.translate(self.account_switch_key.maketrans('&','?'))
get_waf_configversions_url = get_waf_configversions_url + account_switch_key
waf_configversions_response = session.get(get_waf_configversions_url)
return waf_configversions_response
def createWafConfigVersion(self, session, config_id, base_version):
"""
Function to get WAF policy versions
"""
create_waf_configversion_url = 'https://' + self.access_hostname + '/appsec/v1/configs/' + str(config_id) + '/versions'
if '?' in create_waf_configversion_url:
create_waf_configversion_url = create_waf_configversion_url + self.account_switch_key
else:
#Replace & with ? if there is no query string in URL and DO NOT override object property account_switch_key
account_switch_key = self.account_switch_key.translate(self.account_switch_key.maketrans('&','?'))
create_waf_configversion_url = create_waf_configversion_url + account_switch_key
version_info = """
{
"createFromVersion": %s,
"ruleUpdate": false
}""" % (base_version)
create_waf_configversion_response = session.post(create_waf_configversion_url,data=version_info,headers=self.headers)
return create_waf_configversion_response
def getMatchTarget(self, session, config_id, version, target_id):
"""
Function to get Match Target
"""
get_match_target_url = 'https://' + self.access_hostname + '/appsec/v1/configs/' + str(config_id) + '/versions/' + str(version) + '/match-targets/' + str(target_id) + '?includeChildObjectName=true'
if '?' in get_match_target_url:
get_match_target_url = get_match_target_url + self.account_switch_key
else:
#Replace & with ? if there is no query string in URL and DO NOT override object property account_switch_key
account_switch_key = self.account_switch_key.translate(self.account_switch_key.maketrans('&','?'))
get_match_target_url = get_match_target_url + account_switch_key
match_target_response = session.get(get_match_target_url)
return match_target_response
def modifyMatchTarget(self, session, config_id, version, target_id, data):
"""
Function to modify Match Target
"""
match_target_url = 'https://' + self.access_hostname + '/appsec/v1/configs/' + str(config_id) + '/versions/' + str(version) + '/match-targets/' + str(target_id)
if '?' in match_target_url:
match_target_url = match_target_url + self.account_switch_key
else:
#Replace & with ? if there is no query string in URL and DO NOT override object property account_switch_key
account_switch_key = self.account_switch_key.translate(self.account_switch_key.maketrans('&','?'))
match_target_url = match_target_url + account_switch_key
match_target_response = session.put(match_target_url,data=data,headers=self.headers)
return match_target_response
def getWafSelectedHosts(self, session, config_id, version):
"""
Function to get Selected Hosts
"""
get_sel_hosts_url = 'https://' + self.access_hostname + '/appsec/v1/configs/' + str(config_id) + '/versions/' + str(version) + '/selected-hostnames'
if '?' in get_sel_hosts_url:
get_sel_hosts_url = get_sel_hosts_url + self.account_switch_key
else:
#Replace & with ? if there is no query string in URL and DO NOT override object property account_switch_key
account_switch_key = self.account_switch_key.translate(self.account_switch_key.maketrans('&','?'))
get_sel_hosts_url = get_sel_hosts_url + account_switch_key
get_sel_hosts_response = session.get(get_sel_hosts_url)
return get_sel_hosts_response
def modifyWafHosts(self, session, config_id, version, data):
"""
Function to modify/add Hosts
"""
modify_hosts_url = 'https://' + self.access_hostname + '/appsec/v1/configs/' + str(config_id) + '/versions/' + str(version) + '/selected-hostnames'
if '?' in modify_hosts_url:
modify_hosts_url = modify_hosts_url + self.account_switch_key
else:
#Replace & with ? if there is no query string in URL and DO NOT override object property account_switch_key
account_switch_key = self.account_switch_key.translate(self.account_switch_key.maketrans('&','?'))
modify_hosts_url = modify_hosts_url + account_switch_key
modify_hosts_response = session.put(modify_hosts_url,data=data,headers=self.headers)
return modify_hosts_response
def activateWafPolicy(self, session, config_id, version, network, emails,note="Onboard CLI Activation"):
"""
Function to activate WAF policy version
"""
waf_activate_url = 'https://' + self.access_hostname + '/appsec/v1/activations'
if '?' in waf_activate_url:
waf_activate_url = waf_activate_url + self.account_switch_key
else:
#Replace & with ? if there is no query string in URL and DO NOT override object property account_switch_key
account_switch_key = self.account_switch_key.translate(self.account_switch_key.maketrans('&','?'))
waf_activate_url = waf_activate_url + account_switch_key
emailList = json.dumps(emails)
data = """
{
"action": "ACTIVATE",
"network": "%s",
"note": "%s",
"notificationEmails": %s,
"activationConfigs": [
{
"configId": %s,
"configVersion": %s
}
]
}""" % (network, note, emailList, config_id, version)
waf_activate_response = session.post(waf_activate_url,data=data,headers=self.headers)
return waf_activate_response
def pollWafActivationStatus(self, session, contractId, groupId, propertyId, activationId):
"""
Function to poll Activation Status
"""
poll_activation_url = 'https://' + self.access_hostname + '/appsec/v1/activations/' + str(activationId)
if '?' in poll_activation_url:
poll_activation_url = poll_activation_url + self.account_switch_key
else:
account_switch_key = self.account_switch_key.translate(self.account_switch_key.maketrans('&','?'))
poll_activation_url = poll_activation_url + account_switch_key
poll_activation_response = session.get(poll_activation_url)
return poll_activation_response | bin/wrapper_api.py | import json
class apiCallsWrapper(object):
def __init__(self, access_hostname, account_switch_key):
self.access_hostname = access_hostname
if account_switch_key != None:
self.account_switch_key = '&accountSwitchKey=' + account_switch_key
else:
self.account_switch_key = ''
headers = {
"Content-Type": "application/json"
}
def checkAuthorization(self, session):
"""
Function to check permissions granted for Credentials
"""
get_credential_details_url = 'https://' + self.access_hostname + "/-/client-api/active-grants/implicit"
if '?' in get_credential_details_url:
get_credential_details_url = get_credential_details_url + self.account_switch_key
else:
#Replace & with ? if there is no query string in URL and DO NOT override object property account_switch_key
account_switch_key = self.account_switch_key.translate(self.account_switch_key.maketrans('&','?'))
get_credential_details_url = get_credential_details_url + account_switch_key
credential_details_response = session.get(get_credential_details_url)
return credential_details_response
def createCpcode(self,session, contractId, groupId, productId, cpcode_name):
"""
Function to create cpcode
"""
newCpcodeData = """
{
"productId": "%s",
"cpcodeName": "%s"
}
""" % (productId,cpcode_name)
create_cpcode_url = 'https://' + self.access_hostname + '/papi/v1/cpcodes?contractId=' + contractId + '&groupId=' + groupId
if '?' in create_cpcode_url:
create_cpcode_url = create_cpcode_url + self.account_switch_key
else:
#Replace & with ? if there is no query string in URL and DO NOT override object property account_switch_key
account_switch_key = self.account_switch_key.translate(self.account_switch_key.maketrans('&','?'))
create_cpcode_url = create_cpcode_url + account_switch_key
create_cpcode_response = session.post(create_cpcode_url, data=newCpcodeData,headers=self.headers)
return create_cpcode_response
def createProperty(self, session, contractId, groupId, productId, property_name):
"""
Function to create property
"""
newPropertyData = """
{
"productId": "%s",
"propertyName": "%s"
}
""" % (productId,property_name)
create_property_url = 'https://' + self.access_hostname + '/papi/v1/properties?contractId=' + contractId + '&groupId=' + groupId
if '?' in create_property_url:
create_property_url = create_property_url + self.account_switch_key
else:
#Replace & with ? if there is no query string in URL and DO NOT override object property account_switch_key
account_switch_key = self.account_switch_key.translate(self.account_switch_key.maketrans('&','?'))
create_property_url = create_property_url + account_switch_key
create_property_response = session.post(create_property_url, data=newPropertyData,headers=self.headers)
return create_property_response
def updatePropertyRules(self, session, contractId, groupId, propertyId, ruleFormat, ruletree):
"""
Function to update property rules
"""
headers = {
"Content-Type": "application/vnd.akamai.papirules.latest+json"
}
if ruleFormat != 'latest':
version_string = "application/vnd.akamai.papirules." + str(ruleFormat) + "+json"
headers["Content-Type"] = version_string
update_property_url = 'https://' + self.access_hostname + '/papi/v1/properties/' + propertyId +'/versions/1/rules?contractId=' + contractId + '&groupId=' + groupId + '&validateRules=false'
if '?' in update_property_url:
update_property_url = update_property_url + self.account_switch_key
else:
#Replace & with ? if there is no query string in URL and DO NOT override object property account_switch_key
account_switch_key = self.account_switch_key.translate(self.account_switch_key.maketrans('&','?'))
update_property_url = update_property_url + account_switch_key
update_property_response = session.put(update_property_url, data=ruletree,headers=headers)
return update_property_response
def createEdgehostnameArray(self, hostname_list, edge_hostname_id):
"""
Function to create Edgehostname array for existing edgehostnames
"""
edgehostname_list = []
for eachHostname in hostname_list:
edgehostnameDetails = {}
edgehostnameDetails['cnameType'] = 'EDGE_HOSTNAME'
edgehostnameDetails['edgeHostnameId'] = edge_hostname_id
edgehostnameDetails['cnameFrom'] = eachHostname
edgehostname_list.append(edgehostnameDetails)
return edgehostname_list
def checkEdgeHostname(self, session, edge_hostname):
"""
Function to check the validity of edge_hostname
"""
dns_zone = ''
record_name_substring = edge_hostname
if str(edge_hostname).endswith('edgekey.net'):
dns_zone = 'edgekey.net'
record_name_substring = str(edge_hostname).split('.edgekey.net')[0]
elif str(edge_hostname).endswith('edgesuite.net'):
dns_zone = 'edgesuite.net'
record_name_substring = str(edge_hostname).split('.edgesuite.net')[0]
get_edgehostnameid_url = 'https://' + self.access_hostname + "/hapi/v1/edge-hostnames?recordNameSubstring=" + record_name_substring + '&dnsZone=' + dns_zone
if '?' in get_edgehostnameid_url:
get_edgehostnameid_url = get_edgehostnameid_url + self.account_switch_key
else:
#Replace & with ? if there is no query string in URL and DO NOT override object property account_switch_key
account_switch_key = self.account_switch_key.translate(self.account_switch_key.maketrans('&','?'))
get_edgehostnameid_url = get_edgehostnameid_url + account_switch_key
edgehostname_response = session.get(get_edgehostnameid_url)
return edgehostname_response
def updatePropertyHostname(self, session, contractId, groupId, propertyId, edgehostnamedata):
"""
Function to update property hostnames and edgehostname
"""
update_prop_hostname_url = 'https://' + self.access_hostname + '/papi/v1/properties/' + propertyId + '/versions/1/hostnames?contractId=' + contractId + '&groupId=' + groupId + '&validateHostnames=true'
if '?' in update_prop_hostname_url:
update_prop_hostname_url = update_prop_hostname_url + self.account_switch_key
else:
account_switch_key = self.account_switch_key.translate(self.account_switch_key.maketrans('&','?'))
update_prop_hostname_url = update_prop_hostname_url + account_switch_key
update_prop_hostname_response = session.put(update_prop_hostname_url, data=edgehostnamedata, headers=self.headers)
return update_prop_hostname_response
def pollActivationStatus(self, session, contractId, groupId, propertyId, activationId):
"""
Function to poll Activation Status
"""
poll_activation_url = 'https://' + self.access_hostname + '/papi/v1/properties/' + propertyId + '/activations/' + activationId + '?contractId=' + contractId + '&groupId=' + groupId
if '?' in poll_activation_url:
poll_activation_url = poll_activation_url + self.account_switch_key
else:
account_switch_key = self.account_switch_key.translate(self.account_switch_key.maketrans('&','?'))
poll_activation_url = poll_activation_url + account_switch_key
poll_activation_response = session.get(poll_activation_url)
return poll_activation_response
def activateConfiguration(self, session,propertyName, contractId, groupId, propertyId, version, network, emailList, notes):
"""
Function to activate a configuration or property
Parameters
----------
session : <string>
An EdgeGrid Auth akamai session object
property_name: <string>
Property or configuration name
version : <int>
version number to be activated
network : <string>
network type on which configuration has to be activated on
emailList : <string>
List of emailIds separated by comma to be notified
notes : <string>
Notes that describes the activation reason
Returns
-------
activationResponse : activationResponse
(activationResponse) Object with all response details.
"""
emails = json.dumps(emailList)
activationDetails = """
{
"propertyVersion": %s,
"network": "%s",
"note": "%s",
"notifyEmails": %s,
"complianceRecord": {
"noncomplianceReason": "NO_PRODUCTION_TRAFFIC"
}
} """ % (version,network,notes,emails)
actUrl = 'https://' + self.access_hostname + '/papi/v0/properties/'+ propertyId + '/activations/?contractId=' + contractId +'&groupId=' + groupId + '&acknowledgeAllWarnings=true'
if '?' in actUrl:
actUrl = actUrl + self.account_switch_key
else:
account_switch_key = self.account_switch_key.translate(self.account_switch_key.maketrans('&','?'))
actUrl = actUrl + account_switch_key
activationResponse = session.post(actUrl, data=activationDetails, headers=self.headers)
try:
if activationResponse.status_code == 400 and activationResponse.json()['detail'].find('following activation warnings must be acknowledged'):
acknowledgeWarnings = []
for eachWarning in activationResponse.json()['warnings']:
#print("WARNING: " + eachWarning['detail'])
acknowledgeWarnings.append(eachWarning['messageId'])
acknowledgeWarningsJson = json.dumps(acknowledgeWarnings)
print("Automatically acknowledging warnings")
#The details has to be within the three double quote or comment format
updatedactivationDetails = """
{
"propertyVersion": %s,
"network": "%s",
"note": "%s",
"notifyEmails": %s,
"acknowledgeWarnings": %s,
"complianceRecord": {
"noncomplianceReason": "NO_PRODUCTION_TRAFFIC"
}
} """ % (version,network,notes,emails,acknowledgeWarningsJson)
print('Activating property ' + propertyName + ' v1 on ' + network)
updatedactivationResponse = session.post(actUrl,data=updatedactivationDetails,headers=self.headers)
if updatedactivationResponse.status_code == 201:
#print("Here is the activation link, that can be used to track:\n")
#print(updatedactivationResponse.json()['activationLink'])
return updatedactivationResponse
else:
return updatedactivationResponse
elif activationResponse.status_code == 422 and activationResponse.json()['detail'].find('version already activated'):
print("Property version already activated")
return activationResponse
elif activationResponse.status_code == 404 and activationResponse.json()['detail'].find('unable to locate'):
print("The system was unable to locate the requested version of configuration")
return activationResponse
except KeyError:
print("Looks like there is some error in configuration. Unable to activate configuration at this moment\n")
return activationResponse
def getProductsByContract(self, session, contractId):
"""
Function to get product ids for a contract
"""
get_products_url = 'https://' + self.access_hostname + '/papi/v1/products?contractId=' + str(contractId)
if '?' in get_products_url:
get_products_url = get_products_url + self.account_switch_key
else:
#Replace & with ? if there is no query string in URL and DO NOT override object property account_switch_key
account_switch_key = self.account_switch_key.translate(self.account_switch_key.maketrans('&','?'))
get_products_url = get_products_url + account_switch_key
get_products_response = session.get(get_products_url)
return get_products_response
def createEdgehostname(self, session, productId, domainPrefix, secureNetwork, certEnrollmentId, slotNumber, contractId, groupId):
"""
Function to Create a edgehostname
"""
#Create a edgehostname
create_edgehostname_url = 'https://' + self.access_hostname + '/papi/v1/edgehostnames?contractId=' + contractId + '&groupId=' + groupId
if '?' in create_edgehostname_url:
create_edgehostname_url = create_edgehostname_url + self.account_switch_key
else:
account_switch_key = self.account_switch_key.translate(self.account_switch_key.maketrans('&','?'))
create_edgehostname_url = create_edgehostname_url + account_switch_key
if secureNetwork == 'ENHANCED_TLS':
edgehostname_content = """
{
"productId": "%s",
"domainPrefix": "%s",
"domainSuffix": "edgekey.net",
"secureNetwork": "%s",
"ipVersionBehavior": "IPV4",
"certEnrollmentId": %s,
"slotNumber": %s
}""" % (productId, domainPrefix, secureNetwork, certEnrollmentId, slotNumber)
print('\nTrying to create edge_hostname: ' + domainPrefix + '.edgekey.net')
elif secureNetwork == 'STANDARD_TLS':
edgehostname_content = """
{
"productId": "%s",
"domainPrefix": "%s",
"domainSuffix": "edgesuite.net",
"secureNetwork": "%s",
"ipVersionBehavior": "IPV4"
}""" % (productId, domainPrefix, secureNetwork)
print('\nTrying to create edge_hostname: ' + domainPrefix + '.edgesuite.net')
#print(edgehostname_content)
create_edgehostname_response = session.post(create_edgehostname_url,data=edgehostname_content,headers=self.headers)
if create_edgehostname_response.status_code == 201:
edgehostnameId = create_edgehostname_response.json()['edgeHostnameLink'].split('?')[0].split('/')[4]
print('Successfully created edge_hostname: ' + str(edgehostnameId))
return edgehostnameId
else:
print(json.dumps(create_edgehostname_response.json(), indent=4))
return -1
def create_enrollment(self, session, contractId, data, allowDuplicateCn=True):
"""
Function to Create an Enrollment
Parameters
-----------
session : <string>
An EdgeGrid Auth akamai session object
Returns
-------
create_enrollmentRespose : create_enrollmentRespose
(create_enrollmentRespose) Object with all details
"""
headers = {
"Content-Type": "application/vnd.akamai.cps.enrollment.v4+json",
"Accept": "application/vnd.akamai.cps.enrollment-status.v1+json"
}
create_enrollment_url = 'https://' + self.access_hostname + \
'/cps/v2/enrollments?contractId=' + contractId
if allowDuplicateCn:
create_enrollment_url = create_enrollment_url + '&allow-duplicate-cn=true'
if '?' in create_enrollment_url:
create_enrollment_url = create_enrollment_url + self.account_switch_key
else:
#Replace & with ? if there is no query string in URL
self.account_switch_key = self.account_switch_key.translate(self.account_switch_key.maketrans('&','?'))
create_enrollment_url = create_enrollment_url + self.account_switch_key
create_enrollment_response = session.post(create_enrollment_url, data=data, headers=headers)
return create_enrollment_response
def getWafConfigurations(self, session):
"""
Function to get WAF policy versions
"""
get_waf_configs_url = 'https://' + self.access_hostname + '/appsec/v1/configs/'
if '?' in get_waf_configs_url:
get_waf_configs_url = get_waf_configs_url + self.account_switch_key
else:
#Replace & with ? if there is no query string in URL and DO NOT override object property account_switch_key
account_switch_key = self.account_switch_key.translate(self.account_switch_key.maketrans('&','?'))
get_waf_configs_url = get_waf_configs_url + account_switch_key
get_waf_configs_response = session.get(get_waf_configs_url)
return get_waf_configs_response
def getWafConfigVersions(self, session, config_id):
"""
Function to get WAF configs
"""
get_waf_configversions_url = 'https://' + self.access_hostname + '/appsec/v1/configs/' + str(config_id) + '/versions?page=1&pageSize=10&detail=true'
if '?' in get_waf_configversions_url:
get_waf_configversions_url = get_waf_configversions_url + self.account_switch_key
else:
#Replace & with ? if there is no query string in URL and DO NOT override object property account_switch_key
account_switch_key = self.account_switch_key.translate(self.account_switch_key.maketrans('&','?'))
get_waf_configversions_url = get_waf_configversions_url + account_switch_key
waf_configversions_response = session.get(get_waf_configversions_url)
return waf_configversions_response
def createWafConfigVersion(self, session, config_id, base_version):
"""
Function to get WAF policy versions
"""
create_waf_configversion_url = 'https://' + self.access_hostname + '/appsec/v1/configs/' + str(config_id) + '/versions'
if '?' in create_waf_configversion_url:
create_waf_configversion_url = create_waf_configversion_url + self.account_switch_key
else:
#Replace & with ? if there is no query string in URL and DO NOT override object property account_switch_key
account_switch_key = self.account_switch_key.translate(self.account_switch_key.maketrans('&','?'))
create_waf_configversion_url = create_waf_configversion_url + account_switch_key
version_info = """
{
"createFromVersion": %s,
"ruleUpdate": false
}""" % (base_version)
create_waf_configversion_response = session.post(create_waf_configversion_url,data=version_info,headers=self.headers)
return create_waf_configversion_response
def getMatchTarget(self, session, config_id, version, target_id):
"""
Function to get Match Target
"""
get_match_target_url = 'https://' + self.access_hostname + '/appsec/v1/configs/' + str(config_id) + '/versions/' + str(version) + '/match-targets/' + str(target_id) + '?includeChildObjectName=true'
if '?' in get_match_target_url:
get_match_target_url = get_match_target_url + self.account_switch_key
else:
#Replace & with ? if there is no query string in URL and DO NOT override object property account_switch_key
account_switch_key = self.account_switch_key.translate(self.account_switch_key.maketrans('&','?'))
get_match_target_url = get_match_target_url + account_switch_key
match_target_response = session.get(get_match_target_url)
return match_target_response
def modifyMatchTarget(self, session, config_id, version, target_id, data):
"""
Function to modify Match Target
"""
match_target_url = 'https://' + self.access_hostname + '/appsec/v1/configs/' + str(config_id) + '/versions/' + str(version) + '/match-targets/' + str(target_id)
if '?' in match_target_url:
match_target_url = match_target_url + self.account_switch_key
else:
#Replace & with ? if there is no query string in URL and DO NOT override object property account_switch_key
account_switch_key = self.account_switch_key.translate(self.account_switch_key.maketrans('&','?'))
match_target_url = match_target_url + account_switch_key
match_target_response = session.put(match_target_url,data=data,headers=self.headers)
return match_target_response
def getWafSelectedHosts(self, session, config_id, version):
"""
Function to get Selected Hosts
"""
get_sel_hosts_url = 'https://' + self.access_hostname + '/appsec/v1/configs/' + str(config_id) + '/versions/' + str(version) + '/selected-hostnames'
if '?' in get_sel_hosts_url:
get_sel_hosts_url = get_sel_hosts_url + self.account_switch_key
else:
#Replace & with ? if there is no query string in URL and DO NOT override object property account_switch_key
account_switch_key = self.account_switch_key.translate(self.account_switch_key.maketrans('&','?'))
get_sel_hosts_url = get_sel_hosts_url + account_switch_key
get_sel_hosts_response = session.get(get_sel_hosts_url)
return get_sel_hosts_response
def modifyWafHosts(self, session, config_id, version, data):
"""
Function to modify/add Hosts
"""
modify_hosts_url = 'https://' + self.access_hostname + '/appsec/v1/configs/' + str(config_id) + '/versions/' + str(version) + '/selected-hostnames'
if '?' in modify_hosts_url:
modify_hosts_url = modify_hosts_url + self.account_switch_key
else:
#Replace & with ? if there is no query string in URL and DO NOT override object property account_switch_key
account_switch_key = self.account_switch_key.translate(self.account_switch_key.maketrans('&','?'))
modify_hosts_url = modify_hosts_url + account_switch_key
modify_hosts_response = session.put(modify_hosts_url,data=data,headers=self.headers)
return modify_hosts_response
def activateWafPolicy(self, session, config_id, version, network, emails,note="Onboard CLI Activation"):
"""
Function to activate WAF policy version
"""
waf_activate_url = 'https://' + self.access_hostname + '/appsec/v1/activations'
if '?' in waf_activate_url:
waf_activate_url = waf_activate_url + self.account_switch_key
else:
#Replace & with ? if there is no query string in URL and DO NOT override object property account_switch_key
account_switch_key = self.account_switch_key.translate(self.account_switch_key.maketrans('&','?'))
waf_activate_url = waf_activate_url + account_switch_key
emailList = json.dumps(emails)
data = """
{
"action": "ACTIVATE",
"network": "%s",
"note": "%s",
"notificationEmails": %s,
"activationConfigs": [
{
"configId": %s,
"configVersion": %s
}
]
}""" % (network, note, emailList, config_id, version)
waf_activate_response = session.post(waf_activate_url,data=data,headers=self.headers)
return waf_activate_response
def pollWafActivationStatus(self, session, contractId, groupId, propertyId, activationId):
"""
Function to poll Activation Status
"""
poll_activation_url = 'https://' + self.access_hostname + '/appsec/v1/activations/' + str(activationId)
if '?' in poll_activation_url:
poll_activation_url = poll_activation_url + self.account_switch_key
else:
account_switch_key = self.account_switch_key.translate(self.account_switch_key.maketrans('&','?'))
poll_activation_url = poll_activation_url + account_switch_key
poll_activation_response = session.get(poll_activation_url)
return poll_activation_response | 0.205615 | 0.063106 |
import numpy as np
from scipy import signal
from scipy.fftpack import fft, ifft
def band_pass_filter(x, Fs, Fp1, Fp2):
"""Bandpass filter for the signal x.
An acausal fft algorithm is applied (i.e. no phase shift). The filter
functions is constructed from a Hamming window (window used in "firwin2"
function) to avoid ripples in the frequency reponse (windowing is a
smoothing in frequency domain)
Parameters
----------
x : 1d array
Signal to filter
Fs : float
sampling rate
Fp1 : float
low cut-off frequency
Fp2 : float
high cut-off frequency
Returns
-------
xf : array
x filtered
Notes
-----
The passbands (Fp1 Fp2) frequencies are defined in Hz as
----------
/| | \
/ | | \
/ | | \
/ | | \
---------- | | -----------------
| |
Fs1 Fp1 Fp2 Fs2
DEFAULTS values
Fs1 = Fp1 - 0.5 in Hz
Fs2 = Fp2 + 0.5 in Hz
"""
Fp1 = float(Fp1)
Fp2 = float(Fp2)
# Default values in Hz
Fs1 = Fp1 - 0.5
Fs2 = Fp2 + 0.5
assert x.ndim == 1
# Make x EVEN
Norig = len(x)
if Norig % 2 == 1:
x = np.r_[x, 1]
# Normalize frequencies
Ns1 = Fs1 / (Fs / 2)
Ns2 = Fs2 / (Fs / 2)
Np1 = Fp1 / (Fs / 2)
Np2 = Fp2 / (Fs / 2)
# Construct the filter function H(f)
N = len(x)
B = signal.firwin2(N, [0, Ns1, Np1, Np2, Ns2, 1], [0, 0, 1, 1, 0, 0])
# Make zero-phase filter function
H = np.abs(fft(B))
xf = np.real(ifft(fft(x) * H))
xf = xf[:Norig]
x = x[:Norig]
return xf
def low_pass_filter(x, Fs, Fp):
"""Lowpass filter for the signal x.
An acausal fft algorithm is applied (i.e. no phase shift). The filter
functions is constructed from a Hamming window (window used in "firwin2"
function) to avoid ripples in the frequency reponse (windowing is a
smoothing in frequency domain)
Parameters
----------
x : 1d array
Signal to filter
Fs : float
sampling rate
Fp : float
cut-off frequency
Returns
-------
xf : array
x filtered
Notes
-----
The passbands (Fp1 Fp2) frequencies are defined in Hz as
-------------------------
| \
| \
| \
| \
| -----------------
|
Fp Fp+0.5
"""
Fp = float(Fp)
assert x.ndim == 1
# Make x EVEN
Norig = len(x)
if Norig % 2 == 1:
x = np.r_[x, 1]
# Normalize frequencies
Ns = (Fp + 0.5) / (Fs / 2)
Np = Fp / (Fs / 2)
# Construct the filter function H(f)
N = len(x)
B = signal.firwin2(N, [0, Np, Ns, 1], [1, 1, 0, 0])
# Make zero-phase filter function
H = np.abs(fft(B))
xf = np.real(ifft(fft(x) * H))
xf = xf[:Norig]
x = x[:Norig]
return xf
def high_pass_filter(x, Fs, Fp):
"""Highpass filter for the signal x.
An acausal fft algorithm is applied (i.e. no phase shift). The filter
functions is constructed from a Hamming window (window used in "firwin2"
function) to avoid ripples in the frequency reponse (windowing is a
smoothing in frequency domain)
Parameters
----------
x : 1d array
Signal to filter
Fs : float
sampling rate
Fp : float
cut-off frequency
Returns
-------
xf : array
x filtered
Notes
-----
The passbands (Fp1 Fp2) frequencies are defined in Hz as
-----------------------
/|
/ |
/ |
/ |
---------- |
|
Fp-0.5 Fp
"""
Fp = float(Fp)
assert x.ndim == 1
# Make x ODD
Norig = len(x)
if Norig % 2 == 0:
x = np.r_[x, 1]
# Normalize frequencies
Ns = (Fp - 0.5) / (Fs / 2)
Np = Fp / (Fs / 2)
# Construct the filter function H(f)
N = len(x)
B = signal.firwin2(N, [0, Ns, Np, 1], [0, 0, 1, 1])
# Make zero-phase filter function
H = np.abs(fft(B))
xf = np.real(ifft(fft(x) * H))
xf = xf[:Norig]
x = x[:Norig]
return xf | mne/filter.py | import numpy as np
from scipy import signal
from scipy.fftpack import fft, ifft
def band_pass_filter(x, Fs, Fp1, Fp2):
"""Bandpass filter for the signal x.
An acausal fft algorithm is applied (i.e. no phase shift). The filter
functions is constructed from a Hamming window (window used in "firwin2"
function) to avoid ripples in the frequency reponse (windowing is a
smoothing in frequency domain)
Parameters
----------
x : 1d array
Signal to filter
Fs : float
sampling rate
Fp1 : float
low cut-off frequency
Fp2 : float
high cut-off frequency
Returns
-------
xf : array
x filtered
Notes
-----
The passbands (Fp1 Fp2) frequencies are defined in Hz as
----------
/| | \
/ | | \
/ | | \
/ | | \
---------- | | -----------------
| |
Fs1 Fp1 Fp2 Fs2
DEFAULTS values
Fs1 = Fp1 - 0.5 in Hz
Fs2 = Fp2 + 0.5 in Hz
"""
Fp1 = float(Fp1)
Fp2 = float(Fp2)
# Default values in Hz
Fs1 = Fp1 - 0.5
Fs2 = Fp2 + 0.5
assert x.ndim == 1
# Make x EVEN
Norig = len(x)
if Norig % 2 == 1:
x = np.r_[x, 1]
# Normalize frequencies
Ns1 = Fs1 / (Fs / 2)
Ns2 = Fs2 / (Fs / 2)
Np1 = Fp1 / (Fs / 2)
Np2 = Fp2 / (Fs / 2)
# Construct the filter function H(f)
N = len(x)
B = signal.firwin2(N, [0, Ns1, Np1, Np2, Ns2, 1], [0, 0, 1, 1, 0, 0])
# Make zero-phase filter function
H = np.abs(fft(B))
xf = np.real(ifft(fft(x) * H))
xf = xf[:Norig]
x = x[:Norig]
return xf
def low_pass_filter(x, Fs, Fp):
"""Lowpass filter for the signal x.
An acausal fft algorithm is applied (i.e. no phase shift). The filter
functions is constructed from a Hamming window (window used in "firwin2"
function) to avoid ripples in the frequency reponse (windowing is a
smoothing in frequency domain)
Parameters
----------
x : 1d array
Signal to filter
Fs : float
sampling rate
Fp : float
cut-off frequency
Returns
-------
xf : array
x filtered
Notes
-----
The passbands (Fp1 Fp2) frequencies are defined in Hz as
-------------------------
| \
| \
| \
| \
| -----------------
|
Fp Fp+0.5
"""
Fp = float(Fp)
assert x.ndim == 1
# Make x EVEN
Norig = len(x)
if Norig % 2 == 1:
x = np.r_[x, 1]
# Normalize frequencies
Ns = (Fp + 0.5) / (Fs / 2)
Np = Fp / (Fs / 2)
# Construct the filter function H(f)
N = len(x)
B = signal.firwin2(N, [0, Np, Ns, 1], [1, 1, 0, 0])
# Make zero-phase filter function
H = np.abs(fft(B))
xf = np.real(ifft(fft(x) * H))
xf = xf[:Norig]
x = x[:Norig]
return xf
def high_pass_filter(x, Fs, Fp):
"""Highpass filter for the signal x.
An acausal fft algorithm is applied (i.e. no phase shift). The filter
functions is constructed from a Hamming window (window used in "firwin2"
function) to avoid ripples in the frequency reponse (windowing is a
smoothing in frequency domain)
Parameters
----------
x : 1d array
Signal to filter
Fs : float
sampling rate
Fp : float
cut-off frequency
Returns
-------
xf : array
x filtered
Notes
-----
The passbands (Fp1 Fp2) frequencies are defined in Hz as
-----------------------
/|
/ |
/ |
/ |
---------- |
|
Fp-0.5 Fp
"""
Fp = float(Fp)
assert x.ndim == 1
# Make x ODD
Norig = len(x)
if Norig % 2 == 0:
x = np.r_[x, 1]
# Normalize frequencies
Ns = (Fp - 0.5) / (Fs / 2)
Np = Fp / (Fs / 2)
# Construct the filter function H(f)
N = len(x)
B = signal.firwin2(N, [0, Ns, Np, 1], [0, 0, 1, 1])
# Make zero-phase filter function
H = np.abs(fft(B))
xf = np.real(ifft(fft(x) * H))
xf = xf[:Norig]
x = x[:Norig]
return xf | 0.921079 | 0.816918 |
from pygame import MOUSEBUTTONDOWN, MOUSEBUTTONUP
from pygame.sprite import Sprite, Group
from pygame.image import load
from pygame.color import Color
from pygame.rect import Rect
from pygame.surface import Surface
from Utils.Panel import Element
def _collide(ra, rb):
c1 = ra.top <= rb.top <= ra.bottom
c2 = ra.left <= rb.left <= ra.right
return c1 and c2
def collide(a, b, doKill=False):
ans = []
for o in b:
if _collide(a.rect, o.rect) or _collide(o.rect, a.rect):
ans.append(o)
if doKill:
for o in ans:
b.remove(o)
return ans
class Button(Element):
def __init__(self, image, pos, command=None):
if isinstance(image, str):
self.NonFocusImg = load(image + '_NF.png').convert()
self.FocusImg = load(image + '_F.png').convert()
else:
self.NonFocusImg = Surface(image.get_size())
self.NonFocusImg.fill((255, 255, 255))
self.NonFocusImg.blit(image, (0, 0))
self.FocusImg = Surface(image.get_size())
self.FocusImg.blit(self.NonFocusImg, (0, 0))
X, Y = self.FocusImg.get_size()
for x in range(X):
for y in range(Y):
color = self.FocusImg.get_at((x, y))
color = Color(255 - color.r, 255 - color.g, 255 - color.b, color.a)
self.FocusImg.set_at((x, y), color)
self._ready = False
if command is not None:
self.command = command
super().__init__(Surface(self.FocusImg.get_size()), pos)
self.image.blit(self.NonFocusImg, (0, 0))
def V_ready(self):
self.image.set_alpha(30)
def V_Unready(self):
self.image.set_alpha(255)
def ready(self):
self._ready = True
self.V_ready()
def unReady(self):
self._ready = False
self.V_Unready()
def isReady(self):
return self._ready
def Focus(self):
self.image.blit(self.FocusImg, (0, 0))
def UnFocus(self):
self.image.blit(self.NonFocusImg, (0, 0))
class Buttons(Group):
def GetClick(self, pos) -> Button:
mouse = Sprite()
mouse.rect = Rect(*pos, 1, 1)
click = collide(mouse, self)
return None if not click else click[0]
def Buttons(self) -> list[Button]:
return self.sprites()
def GetReady(self) -> Button:
ready = [btn for btn in self.Buttons() if btn.isReady()]
return None if not ready else ready[0]
def ButtonControl(eve, buttons, Pos, PressButton):
for btn in buttons.Buttons():
if btn.isReady():
btn.V_Unready()
else:
btn.UnFocus()
btn = buttons.GetClick(Pos)
btn2 = buttons.GetReady()
if btn and btn2:
if btn2 == btn:
btn.V_ready()
elif btn:
btn.Focus()
if eve.type == MOUSEBUTTONDOWN:
if PressButton[0]:
btn = buttons.GetClick(Pos)
if btn:
btn.ready()
elif eve.type == MOUSEBUTTONUP:
if not PressButton[0]:
btn = buttons.GetClick(Pos)
if btn and btn.isReady():
return btn.command()
elif buttons.GetReady():
buttons.GetReady().unReady()
return None | Frame/Utils/ButtonElement.py | from pygame import MOUSEBUTTONDOWN, MOUSEBUTTONUP
from pygame.sprite import Sprite, Group
from pygame.image import load
from pygame.color import Color
from pygame.rect import Rect
from pygame.surface import Surface
from Utils.Panel import Element
def _collide(ra, rb):
c1 = ra.top <= rb.top <= ra.bottom
c2 = ra.left <= rb.left <= ra.right
return c1 and c2
def collide(a, b, doKill=False):
ans = []
for o in b:
if _collide(a.rect, o.rect) or _collide(o.rect, a.rect):
ans.append(o)
if doKill:
for o in ans:
b.remove(o)
return ans
class Button(Element):
def __init__(self, image, pos, command=None):
if isinstance(image, str):
self.NonFocusImg = load(image + '_NF.png').convert()
self.FocusImg = load(image + '_F.png').convert()
else:
self.NonFocusImg = Surface(image.get_size())
self.NonFocusImg.fill((255, 255, 255))
self.NonFocusImg.blit(image, (0, 0))
self.FocusImg = Surface(image.get_size())
self.FocusImg.blit(self.NonFocusImg, (0, 0))
X, Y = self.FocusImg.get_size()
for x in range(X):
for y in range(Y):
color = self.FocusImg.get_at((x, y))
color = Color(255 - color.r, 255 - color.g, 255 - color.b, color.a)
self.FocusImg.set_at((x, y), color)
self._ready = False
if command is not None:
self.command = command
super().__init__(Surface(self.FocusImg.get_size()), pos)
self.image.blit(self.NonFocusImg, (0, 0))
def V_ready(self):
self.image.set_alpha(30)
def V_Unready(self):
self.image.set_alpha(255)
def ready(self):
self._ready = True
self.V_ready()
def unReady(self):
self._ready = False
self.V_Unready()
def isReady(self):
return self._ready
def Focus(self):
self.image.blit(self.FocusImg, (0, 0))
def UnFocus(self):
self.image.blit(self.NonFocusImg, (0, 0))
class Buttons(Group):
def GetClick(self, pos) -> Button:
mouse = Sprite()
mouse.rect = Rect(*pos, 1, 1)
click = collide(mouse, self)
return None if not click else click[0]
def Buttons(self) -> list[Button]:
return self.sprites()
def GetReady(self) -> Button:
ready = [btn for btn in self.Buttons() if btn.isReady()]
return None if not ready else ready[0]
def ButtonControl(eve, buttons, Pos, PressButton):
for btn in buttons.Buttons():
if btn.isReady():
btn.V_Unready()
else:
btn.UnFocus()
btn = buttons.GetClick(Pos)
btn2 = buttons.GetReady()
if btn and btn2:
if btn2 == btn:
btn.V_ready()
elif btn:
btn.Focus()
if eve.type == MOUSEBUTTONDOWN:
if PressButton[0]:
btn = buttons.GetClick(Pos)
if btn:
btn.ready()
elif eve.type == MOUSEBUTTONUP:
if not PressButton[0]:
btn = buttons.GetClick(Pos)
if btn and btn.isReady():
return btn.command()
elif buttons.GetReady():
buttons.GetReady().unReady()
return None | 0.463687 | 0.122549 |
import tweepy, sys, os
from datetime import datetime
from time import tzname
from math import *
from random import randint, choice, seed
from PIL import Image, ImageDraw, ImageFont
# The PolyFriends Bot
# Find the bot at https://twitter.com/PolyFriendsBot!
# Created by <NAME> 2020
# Files
KEYS_PATH = "keys.txt"
NAMES = "resources/names.txt"
HOBBIES = "resources/hobbies.txt"
COLORS = "resources/colors.txt"
FONT = "resources/PixelSplitter-Bold.ttf"
# Custom seed for RNG
SEED = ""
IMG_SIZE = (1000,1000)
def main(argv):
tweet = False
save_date = False
for arg in argv:
if arg == "--tweet":
tweet = True
elif arg == "--date_stamp":
save_date = True
time = datetime.now()
file = real_path(get_filename(time) if save_date else "img.png")
if SEED != "":
seed(SEED)
generator = PolyFriendGenerator(IMG_SIZE, 5, rand_text(NAMES), real_path(FONT), file)
generator.generate_image()
generator.save_image()
status = generate_status(generator.name, time)
print(status)
if tweet:
keys = list(getkeys())
auth = tweepy.OAuthHandler(keys[0],keys[1])
auth.set_access_token(keys[2],keys[3])
api = tweepy.API(auth)
try:
api.update_with_media(file, status)
print("Tweeted image.")
except tweepy.TweepError as e:
print(f"Tweepy error:\n{e.reason}")
def generate_status(name, time):
hobby, color = rand_text(HOBBIES, True), rand_text(COLORS)
minutes = time.minute if len(str(time.minute)) != 1 else "0" + str(time.minute)
return f"This is {name}, and they like {hobby}!\nTheir favorite color is \"{color}\"\nCreated on {time.month}/{time.day}/{time.year} at {time.hour}:{minutes} {tzname[0]}"
def rand_text(file, lower=False):
try:
text = choice([s.replace('\n','') for s in open(real_path(file),"r").readlines()])
return text.lower() if lower else text
except FileNotFoundError:
print(f"Could not find {file}")
exit()
def getkeys():
try:
lines = open(real_path(KEYS_PATH),'r').readlines()
except FileNotFoundError:
print("keys.txt not found")
exit()
for i in range(len(lines)):
if i < 4: yield lines[i].replace('\n','')
def real_path(file):
return f"{os.path.dirname(os.path.realpath(__file__))}/{file}"
def get_filename(time):
return f"img_{time.month}{time.day}{time.year}{time.hour}{time.minute}{time.second}.png"
class PolyFriendGenerator:
def __init__(self, size, width, name, font_name, save_name):
self.image = Image.new("HSV",size,self.rand_pastel())
self.draw = ImageDraw.Draw(self.image)
self.font = ImageFont.truetype(font_name, 75)
self.name = name
self.pixels = self.image.load()
self.size = size
self.width = width
self.save_name = save_name
self.c = (size[0]/2, size[1]/2)
# Sizes and lengths are ratios of the image dimensions to preserve the same look.
x, y = size[0], size[1]
self.b_size = randint(int(y/17), int(y/9))
self.h_size = randint(int(y/10), int(y/5.5))
self.b_length = randint(int(-y/18),int(y/10))
self.leg_length = randint(int(y/14),int(y/10))
self.arm_length = self.leg_length
self.feet_length = randint(15,50)
self.arm_angles = (randint(-60, 75), randint(-60, 75))
self.eye_angles = [(-90, 90), (90, -90)]
self.finger_angles = [0,50,-50]
self.leg_angle = randint(75,90)
self.arm_yoff = int(self.c[1]+size[1]/18)
self.leg_xoff = self.b_size/8
self.eye_xoff = self.h_size/4.5
self.eye_circle = bool(randint(0,3))
self.stroke = (randint(0,255),255,90)
self.border_width = 40
c1, c2 = cos(2*pi/5), -cos(pi/5)
s1, s2 = sin(2*pi/5), sin(4*pi/5)
h, b = self.h_size, self.b_size
self.head_shapes = [
# Squares
lambda x,y: [(x-h,y-h), (x+h,y-h), (x+h,y+h), (x-h,y+h)],
lambda x,y: [(x,y-h), (x+h,y), (x,y+h), (x-h,y)],
# Pentagons
lambda x,y: [(x,y+h), (x+h*s1,y+h*c1), (x+h*s2,y+h*c2), (x-h*s2,y+h*c2), (x-h*s1,y+h*c1)],
lambda x,y: [(x,y-h), (x+h*s1,y-h*c1), (x+h*s2*1.5,y+h), (x-h*s2*1.5,y+h), (x-h*s1,y-h*c1)],
# Triangles
lambda x,y: [(x,y-h), (x-h,y+h), (x+h,y+h)],
lambda x,y: [(x,y+h), (x-h*1.5,y-h), (x+h*1.5,y-h)],
# Trapezoid
lambda x,y: [(x-h/1.5,y-h/1.5), (x+h/1.5,y-h/1.5), (x+h,y+h), (x-h,y+h)],
# Rhombus
lambda x,y: [(x,y-h), (x+h*1.5,y), (x,y+h), (x-h*1.5,y)],
]
self.body_shapes = [
# Trapezoids
lambda x,y,l: [(x-b/4,y), (x+b/4,y), (x+b,y+b*2+l), (x-b,y+b*2+l)],
lambda x,y,l: [(x-b/1.5,y), (x+b/1.5,y), (x+b/4,y+b*2+l), (x-b/4,y+b*2+l)],
# Square
lambda x,y,l: [(x-b/2,y), (x+b/2,y), (x+b/2,y+b*2+l), (x-b/2,y+b*2+l)],
# Kite
lambda x,y,l: [(x-b/4,y), (x+b/4,y), (x+b/2,y+b*1.5+l), (x,y+b*2+l), (x-b/2,y+b*1.5+l)],
# Diamond
lambda x,y,l: [(x,y+b*2+l), (x+b*s1,y+b*c1), (x+b*s2,y), (x-b*s2,y), (x-b*s1,y+b*c1)],
]
self.eyebrows = [
lambda x,y: [[],[]],
lambda x,y: [[(x-h/10,y-h/15),(x+h/10,y+h/20)],[(x-h/10,y+h/20),(x+h/10,y-h/15)]],
lambda x,y: [[(x-h/10,y+h/15),(x+h/10,y-h/20)],[(x-h/10,y-h/20),(x+h/10,y+h/15)]],
lambda x,y: [[(x-h/10,y),(x+h/10,y)],[(x-h/10,y),(x+h/10,y)]],
lambda x,y: [[(x-h/10,y),(x,y-h/10), (x,y-h/10),(x+h/10,y)], [(x-h/10,y),(x,y-h/10), (x,y-h/10),(x+h/10,y)]]
]
self.h_points = choice(self.head_shapes)(self.c[0], self.c[1]-h)
self.b_points = choice(self.body_shapes)(self.c[0], self.c[1], 100)
def save_image(self):
self.image = self.image.convert(mode="RGB")
self.image.save(self.save_name, "PNG")
def generate_image(self):
self.draw_head()
self.draw_body()
self.draw_border()
self.draw.text((self.border_width,self.border_width), self.name, self.stroke, self.font)
# Draw functions
def draw_head(self):
self.draw.polygon(self.h_points, self.rand_pastel(), self.stroke)
self.polygon(self.h_points, self.stroke, self.width)
self.draw_eyes()
def draw_body(self):
self.draw.polygon(self.b_points,self.rand_pastel(),self.stroke)
self.polygon(self.b_points,self.stroke, self.width)
self.draw_limbs()
def draw_eyes(self):
eye_r = self.h_size/5.25
eyebrows = choice(self.eyebrows)
for i in range(2):
eye_x = self.eye_xoff if bool(i) else -self.eye_xoff
eye_pos = (self.c[0] - eye_x, self.c[1] - self.h_size)
if self.eye_circle:
self.ellipse(eye_pos, eye_r)
else:
self.draw.rectangle(self.bound(eye_pos, eye_r), (0,0,255), self.stroke, self.width-2)
# Puplis
angles = choice(self.eye_angles)
self.draw.chord(self.bound(eye_pos, eye_r-6), angles[0], angles[1], (0,0,0))
# Eyebrows
self.draw.line(eyebrows(eye_pos[0],eye_pos[1]-self.h_size/3.5)[i],self.stroke, self.width)
def draw_limbs(self):
# Arms & fingers
for i in range(2):
arm = self.arm_points(self.arm_angles[i], self.arm_length, bool(i))
self.draw.line(arm, self.stroke, self.width)
for j in range(0,3):
self.draw.line([arm[1], self.limb_point(arm[1], self.arm_angles[i]+self.finger_angles[j], 18, bool(i))], self.stroke, self.width-1)
# Legs & feet
for i in range(2):
leg = self.leg_points(self.leg_angle, self.leg_length, bool(i))
self.draw.line(leg, self.stroke, self.width)
self.draw.line([leg[1], self.limb_point(leg[1], self.leg_angle-90, self.feet_length,bool(i))], self.stroke, self.width)
def draw_border(self):
self.draw.line([(0,0), (self.size[0],0)], self.stroke, self.border_width)
self.draw.line([(self.size[0],0), (self.size[0],self.size[1])], self.stroke, self.border_width)
self.draw.line([(self.size[0],self.size[1]), (0,self.size[1])], self.stroke, self.border_width)
self.draw.line([(0,self.size[1]), (0,0)], self.stroke, self.border_width)
# Helper functions
def arm_points(self, angle, length, right):
r = range(self.size[0]-1)
for x in reversed(r) if right else r:
if self.pixels[x, self.arm_yoff] == self.stroke:
return [(x, self.arm_yoff), self.limb_point((x, self.arm_yoff), angle, length, right)]
def leg_points(self, angle, length, right):
x = self.c[0] + (self.leg_xoff if right else -self.leg_xoff)
for y in reversed(range(self.size[1]-1)):
if self.pixels[x, y] == self.stroke:
return [(x, y), self.limb_point((x, y), angle, length, right)]
def ellipse(self, c, r):
self.draw.ellipse(self.bound(c, r), (0,0,255), self.stroke, self.width-2)
def bound(self, c, r):
return [(c[0] - r, c[1] - r), (c[0] + r, c[1] + r)]
def polygon(self, points, fill, width):
self.draw.line([points[0],points[len(points)-1]],fill,width)
for i in range(len(points)-1):
self.draw.line([points[i],points[i+1]],fill,width)
def limb_point(self, p, deg, dis, right):
x, y = int(dis*cos(radians(deg))), int(dis*sin(radians(deg)))+p[1]
return (x+p[0] if right else -x+p[0], y)
def rand_pastel(self):
return (randint(0,255),randint(60,85),255)
if __name__ == "__main__":
main(sys.argv) | polyfriends_bot.py | import tweepy, sys, os
from datetime import datetime
from time import tzname
from math import *
from random import randint, choice, seed
from PIL import Image, ImageDraw, ImageFont
# The PolyFriends Bot
# Find the bot at https://twitter.com/PolyFriendsBot!
# Created by <NAME> 2020
# Files
KEYS_PATH = "keys.txt"
NAMES = "resources/names.txt"
HOBBIES = "resources/hobbies.txt"
COLORS = "resources/colors.txt"
FONT = "resources/PixelSplitter-Bold.ttf"
# Custom seed for RNG
SEED = ""
IMG_SIZE = (1000,1000)
def main(argv):
tweet = False
save_date = False
for arg in argv:
if arg == "--tweet":
tweet = True
elif arg == "--date_stamp":
save_date = True
time = datetime.now()
file = real_path(get_filename(time) if save_date else "img.png")
if SEED != "":
seed(SEED)
generator = PolyFriendGenerator(IMG_SIZE, 5, rand_text(NAMES), real_path(FONT), file)
generator.generate_image()
generator.save_image()
status = generate_status(generator.name, time)
print(status)
if tweet:
keys = list(getkeys())
auth = tweepy.OAuthHandler(keys[0],keys[1])
auth.set_access_token(keys[2],keys[3])
api = tweepy.API(auth)
try:
api.update_with_media(file, status)
print("Tweeted image.")
except tweepy.TweepError as e:
print(f"Tweepy error:\n{e.reason}")
def generate_status(name, time):
hobby, color = rand_text(HOBBIES, True), rand_text(COLORS)
minutes = time.minute if len(str(time.minute)) != 1 else "0" + str(time.minute)
return f"This is {name}, and they like {hobby}!\nTheir favorite color is \"{color}\"\nCreated on {time.month}/{time.day}/{time.year} at {time.hour}:{minutes} {tzname[0]}"
def rand_text(file, lower=False):
try:
text = choice([s.replace('\n','') for s in open(real_path(file),"r").readlines()])
return text.lower() if lower else text
except FileNotFoundError:
print(f"Could not find {file}")
exit()
def getkeys():
try:
lines = open(real_path(KEYS_PATH),'r').readlines()
except FileNotFoundError:
print("keys.txt not found")
exit()
for i in range(len(lines)):
if i < 4: yield lines[i].replace('\n','')
def real_path(file):
return f"{os.path.dirname(os.path.realpath(__file__))}/{file}"
def get_filename(time):
return f"img_{time.month}{time.day}{time.year}{time.hour}{time.minute}{time.second}.png"
class PolyFriendGenerator:
def __init__(self, size, width, name, font_name, save_name):
self.image = Image.new("HSV",size,self.rand_pastel())
self.draw = ImageDraw.Draw(self.image)
self.font = ImageFont.truetype(font_name, 75)
self.name = name
self.pixels = self.image.load()
self.size = size
self.width = width
self.save_name = save_name
self.c = (size[0]/2, size[1]/2)
# Sizes and lengths are ratios of the image dimensions to preserve the same look.
x, y = size[0], size[1]
self.b_size = randint(int(y/17), int(y/9))
self.h_size = randint(int(y/10), int(y/5.5))
self.b_length = randint(int(-y/18),int(y/10))
self.leg_length = randint(int(y/14),int(y/10))
self.arm_length = self.leg_length
self.feet_length = randint(15,50)
self.arm_angles = (randint(-60, 75), randint(-60, 75))
self.eye_angles = [(-90, 90), (90, -90)]
self.finger_angles = [0,50,-50]
self.leg_angle = randint(75,90)
self.arm_yoff = int(self.c[1]+size[1]/18)
self.leg_xoff = self.b_size/8
self.eye_xoff = self.h_size/4.5
self.eye_circle = bool(randint(0,3))
self.stroke = (randint(0,255),255,90)
self.border_width = 40
c1, c2 = cos(2*pi/5), -cos(pi/5)
s1, s2 = sin(2*pi/5), sin(4*pi/5)
h, b = self.h_size, self.b_size
self.head_shapes = [
# Squares
lambda x,y: [(x-h,y-h), (x+h,y-h), (x+h,y+h), (x-h,y+h)],
lambda x,y: [(x,y-h), (x+h,y), (x,y+h), (x-h,y)],
# Pentagons
lambda x,y: [(x,y+h), (x+h*s1,y+h*c1), (x+h*s2,y+h*c2), (x-h*s2,y+h*c2), (x-h*s1,y+h*c1)],
lambda x,y: [(x,y-h), (x+h*s1,y-h*c1), (x+h*s2*1.5,y+h), (x-h*s2*1.5,y+h), (x-h*s1,y-h*c1)],
# Triangles
lambda x,y: [(x,y-h), (x-h,y+h), (x+h,y+h)],
lambda x,y: [(x,y+h), (x-h*1.5,y-h), (x+h*1.5,y-h)],
# Trapezoid
lambda x,y: [(x-h/1.5,y-h/1.5), (x+h/1.5,y-h/1.5), (x+h,y+h), (x-h,y+h)],
# Rhombus
lambda x,y: [(x,y-h), (x+h*1.5,y), (x,y+h), (x-h*1.5,y)],
]
self.body_shapes = [
# Trapezoids
lambda x,y,l: [(x-b/4,y), (x+b/4,y), (x+b,y+b*2+l), (x-b,y+b*2+l)],
lambda x,y,l: [(x-b/1.5,y), (x+b/1.5,y), (x+b/4,y+b*2+l), (x-b/4,y+b*2+l)],
# Square
lambda x,y,l: [(x-b/2,y), (x+b/2,y), (x+b/2,y+b*2+l), (x-b/2,y+b*2+l)],
# Kite
lambda x,y,l: [(x-b/4,y), (x+b/4,y), (x+b/2,y+b*1.5+l), (x,y+b*2+l), (x-b/2,y+b*1.5+l)],
# Diamond
lambda x,y,l: [(x,y+b*2+l), (x+b*s1,y+b*c1), (x+b*s2,y), (x-b*s2,y), (x-b*s1,y+b*c1)],
]
self.eyebrows = [
lambda x,y: [[],[]],
lambda x,y: [[(x-h/10,y-h/15),(x+h/10,y+h/20)],[(x-h/10,y+h/20),(x+h/10,y-h/15)]],
lambda x,y: [[(x-h/10,y+h/15),(x+h/10,y-h/20)],[(x-h/10,y-h/20),(x+h/10,y+h/15)]],
lambda x,y: [[(x-h/10,y),(x+h/10,y)],[(x-h/10,y),(x+h/10,y)]],
lambda x,y: [[(x-h/10,y),(x,y-h/10), (x,y-h/10),(x+h/10,y)], [(x-h/10,y),(x,y-h/10), (x,y-h/10),(x+h/10,y)]]
]
self.h_points = choice(self.head_shapes)(self.c[0], self.c[1]-h)
self.b_points = choice(self.body_shapes)(self.c[0], self.c[1], 100)
def save_image(self):
self.image = self.image.convert(mode="RGB")
self.image.save(self.save_name, "PNG")
def generate_image(self):
self.draw_head()
self.draw_body()
self.draw_border()
self.draw.text((self.border_width,self.border_width), self.name, self.stroke, self.font)
# Draw functions
def draw_head(self):
self.draw.polygon(self.h_points, self.rand_pastel(), self.stroke)
self.polygon(self.h_points, self.stroke, self.width)
self.draw_eyes()
def draw_body(self):
self.draw.polygon(self.b_points,self.rand_pastel(),self.stroke)
self.polygon(self.b_points,self.stroke, self.width)
self.draw_limbs()
def draw_eyes(self):
eye_r = self.h_size/5.25
eyebrows = choice(self.eyebrows)
for i in range(2):
eye_x = self.eye_xoff if bool(i) else -self.eye_xoff
eye_pos = (self.c[0] - eye_x, self.c[1] - self.h_size)
if self.eye_circle:
self.ellipse(eye_pos, eye_r)
else:
self.draw.rectangle(self.bound(eye_pos, eye_r), (0,0,255), self.stroke, self.width-2)
# Puplis
angles = choice(self.eye_angles)
self.draw.chord(self.bound(eye_pos, eye_r-6), angles[0], angles[1], (0,0,0))
# Eyebrows
self.draw.line(eyebrows(eye_pos[0],eye_pos[1]-self.h_size/3.5)[i],self.stroke, self.width)
def draw_limbs(self):
# Arms & fingers
for i in range(2):
arm = self.arm_points(self.arm_angles[i], self.arm_length, bool(i))
self.draw.line(arm, self.stroke, self.width)
for j in range(0,3):
self.draw.line([arm[1], self.limb_point(arm[1], self.arm_angles[i]+self.finger_angles[j], 18, bool(i))], self.stroke, self.width-1)
# Legs & feet
for i in range(2):
leg = self.leg_points(self.leg_angle, self.leg_length, bool(i))
self.draw.line(leg, self.stroke, self.width)
self.draw.line([leg[1], self.limb_point(leg[1], self.leg_angle-90, self.feet_length,bool(i))], self.stroke, self.width)
def draw_border(self):
self.draw.line([(0,0), (self.size[0],0)], self.stroke, self.border_width)
self.draw.line([(self.size[0],0), (self.size[0],self.size[1])], self.stroke, self.border_width)
self.draw.line([(self.size[0],self.size[1]), (0,self.size[1])], self.stroke, self.border_width)
self.draw.line([(0,self.size[1]), (0,0)], self.stroke, self.border_width)
# Helper functions
def arm_points(self, angle, length, right):
r = range(self.size[0]-1)
for x in reversed(r) if right else r:
if self.pixels[x, self.arm_yoff] == self.stroke:
return [(x, self.arm_yoff), self.limb_point((x, self.arm_yoff), angle, length, right)]
def leg_points(self, angle, length, right):
x = self.c[0] + (self.leg_xoff if right else -self.leg_xoff)
for y in reversed(range(self.size[1]-1)):
if self.pixels[x, y] == self.stroke:
return [(x, y), self.limb_point((x, y), angle, length, right)]
def ellipse(self, c, r):
self.draw.ellipse(self.bound(c, r), (0,0,255), self.stroke, self.width-2)
def bound(self, c, r):
return [(c[0] - r, c[1] - r), (c[0] + r, c[1] + r)]
def polygon(self, points, fill, width):
self.draw.line([points[0],points[len(points)-1]],fill,width)
for i in range(len(points)-1):
self.draw.line([points[i],points[i+1]],fill,width)
def limb_point(self, p, deg, dis, right):
x, y = int(dis*cos(radians(deg))), int(dis*sin(radians(deg)))+p[1]
return (x+p[0] if right else -x+p[0], y)
def rand_pastel(self):
return (randint(0,255),randint(60,85),255)
if __name__ == "__main__":
main(sys.argv) | 0.193033 | 0.142202 |
import json
from pathlib import Path
from typing import Any, Tuple
import click
from openff.toolkit.typing.engines.smirnoff import ForceField
from openff.units import unit
from rich import get_console, pretty
from rich.console import NewLine
from rich.padding import Padding
from interchange_regression_utilities.perturb import (
default_perturbation,
enumerate_perturbations,
)
def perturbation_function(attribute_path: str, old_value: Any) -> Tuple[Any, bool]:
new_values = {
"ConstraintHandler/Constraints/distance": 0.1234 * unit.angstrom,
}
if attribute_path in new_values:
return new_values[attribute_path], True
return default_perturbation(attribute_path, old_value)
@click.command()
@click.option(
"--force-field",
"force_field_path",
help="The path of the force field to perturb.",
type=click.Path(exists=False, file_okay=True, dir_okay=False),
required=True,
default=str(Path("force-fields", "minimal-force-field.offxml")),
show_default=True,
)
@click.option(
"--output",
"output_path",
help="The path (JSON) to save the list of perturbations to apply to.",
type=click.Path(exists=False, dir_okay=False, file_okay=True, path_type=Path),
required=True,
)
def main(force_field_path: Path, output_path: Path):
console = get_console()
pretty.install(console)
perturbations, warning_messages = enumerate_perturbations(
ForceField(force_field_path), perturbation_function
)
if len(warning_messages) > 0:
console.print(
*(
Padding(f"[yellow]WARNING[/yellow] {message}", (0, 0, 0, 0))
if i == 0
else Padding(message, (0, 0, 0, 8))
for i, message in enumerate(warning_messages)
),
NewLine(),
)
output_path.parent.mkdir(exist_ok=True, parents=True)
with output_path.open("w") as file:
json.dump([value.dict() for value in perturbations], file, indent=2)
if __name__ == "__main__":
main() | value-propagation/enumerate-perturbations.py | import json
from pathlib import Path
from typing import Any, Tuple
import click
from openff.toolkit.typing.engines.smirnoff import ForceField
from openff.units import unit
from rich import get_console, pretty
from rich.console import NewLine
from rich.padding import Padding
from interchange_regression_utilities.perturb import (
default_perturbation,
enumerate_perturbations,
)
def perturbation_function(attribute_path: str, old_value: Any) -> Tuple[Any, bool]:
new_values = {
"ConstraintHandler/Constraints/distance": 0.1234 * unit.angstrom,
}
if attribute_path in new_values:
return new_values[attribute_path], True
return default_perturbation(attribute_path, old_value)
@click.command()
@click.option(
"--force-field",
"force_field_path",
help="The path of the force field to perturb.",
type=click.Path(exists=False, file_okay=True, dir_okay=False),
required=True,
default=str(Path("force-fields", "minimal-force-field.offxml")),
show_default=True,
)
@click.option(
"--output",
"output_path",
help="The path (JSON) to save the list of perturbations to apply to.",
type=click.Path(exists=False, dir_okay=False, file_okay=True, path_type=Path),
required=True,
)
def main(force_field_path: Path, output_path: Path):
console = get_console()
pretty.install(console)
perturbations, warning_messages = enumerate_perturbations(
ForceField(force_field_path), perturbation_function
)
if len(warning_messages) > 0:
console.print(
*(
Padding(f"[yellow]WARNING[/yellow] {message}", (0, 0, 0, 0))
if i == 0
else Padding(message, (0, 0, 0, 8))
for i, message in enumerate(warning_messages)
),
NewLine(),
)
output_path.parent.mkdir(exist_ok=True, parents=True)
with output_path.open("w") as file:
json.dump([value.dict() for value in perturbations], file, indent=2)
if __name__ == "__main__":
main() | 0.59843 | 0.254113 |
from urllib import parse
import requests
import logging
import json
import time
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
from veracode_api_signing.exceptions import VeracodeAPISigningException
from veracode_api_signing.plugin_requests import RequestsAuthPluginVeracodeHMAC
from veracode_api_signing.credentials import get_credentials
from veracode_api_signing.regions import get_region_for_api_credential
from .exceptions import VeracodeAPIError
from .log import VeracodeLog as vlog
from .constants import Constants
logger = logging.getLogger(__name__)
class APIHelper():
api_key_id = None
api_key_secret = None
region = None
def __init__(self, proxies=None, debug=False):
self.baseurl = self._get_baseurl()
requests.Session().mount(self.baseurl, HTTPAdapter(max_retries=3))
self.proxies = proxies
self.base_rest_url = self._get_baseresturl()
self.retry_seconds = 120
self.connect_error_msg = "Connection Error"
# vlog.setup_logging(self,debug=debug)
# helper functions
def _get_baseurl(self):
return self._get_region_url('xml')
def _get_baseresturl(self):
return self._get_region_url('rest')
def _get_region_url(self,type):
if self.api_key_id is None or self.api_key_secret is None:
self.api_key_id, self.api_key_secret = get_credentials()
if self.region is None:
self.region = get_region_for_api_credential(self.api_key_id)
if type == 'xml':
return Constants().REGIONS[self.region]['base_xml_url']
elif type == 'rest':
return Constants().REGIONS[self.region]['base_rest_url']
def _rest_request(self, url, method, params=None,body=None,fullresponse=False,use_base_url=True):
# base request method for a REST request
myheaders = {"User-Agent": "api.py"}
if method in ["POST", "PUT"]:
myheaders.update({'Content-type': 'application/json'})
retry_strategy = Retry(total=3,
status_forcelist=[429, 500, 502, 503, 504],
method_whitelist=["HEAD", "GET", "OPTIONS"]
)
session = requests.Session()
session.mount(self.base_rest_url, HTTPAdapter(max_retries=retry_strategy))
if use_base_url:
url = self.base_rest_url + url
try:
if method == "GET":
request = requests.Request(method, url, params=params, auth=RequestsAuthPluginVeracodeHMAC(), headers=myheaders)
prepared_request = request.prepare()
r = session.send(prepared_request, proxies=self.proxies)
elif method == "POST":
r = requests.post(url, params=params,auth=RequestsAuthPluginVeracodeHMAC(),headers=myheaders,data=body)
elif method == "PUT":
r = requests.put(url, params=params,auth=RequestsAuthPluginVeracodeHMAC(), headers=myheaders,data=body)
elif method == "DELETE":
r = requests.delete(url, params=params,auth=RequestsAuthPluginVeracodeHMAC(),headers=myheaders)
else:
raise VeracodeAPIError("Unsupported HTTP method")
except requests.exceptions.RequestException as e:
logger.exception(self.connect_error_msg)
raise VeracodeAPIError(e)
if not (r.status_code == requests.codes.ok):
logger.debug("API call returned non-200 HTTP status code: {}".format(r.status_code))
if not (r.ok):
logger.debug("Error retrieving data. HTTP status code: {}".format(r.status_code))
if r.status_code == 401:
logger.exception("Error [{}]: {} for request {}. Check that your Veracode API account credentials are correct.".format(r.status_code,
r.text, r.request.url))
else:
logger.exception("Error [{}]: {} for request {}".
format(r.status_code, r.text, r.request.url))
raise requests.exceptions.RequestException()
if fullresponse:
return r
elif r.text != "":
return r.json()
else:
return ""
def _rest_paged_request(self, uri, method, element, params=None):
all_data = []
page = 0
more_pages = True
while more_pages:
params['page']=page
page_data = self._rest_request(uri,method,params)
total_pages = page_data.get('page', {}).get('total_pages', 0)
data_page = page_data.get('_embedded', {}).get(element, [])
all_data += data_page
page += 1
more_pages = page < total_pages
return all_data
def _xml_request(self, url, method, params=None):
# base request method for XML APIs, handles what little error handling there is around these APIs
if method not in ["GET", "POST"]:
raise VeracodeAPIError("Unsupported HTTP method")
try:
session = requests.Session()
session.mount(self.baseurl, HTTPAdapter(max_retries=3))
request = requests.Request(method, url, params=params, auth=RequestsAuthPluginVeracodeHMAC(),headers={"User-Agent": "api.py"})
prepared_request = request.prepare()
r = session.send(prepared_request, proxies=self.proxies)
if 200 <= r.status_code <= 299:
if r.status_code == 204:
#retry after wait
time.sleep(self.retry_seconds)
return self._request(url,method,params)
elif r.content is None:
logger.debug("HTTP response body empty:\r\n{}\r\n{}\r\n{}\r\n\r\n{}\r\n{}\r\n{}\r\n"
.format(r.request.url, r.request.headers, r.request.body, r.status_code, r.headers, r.content))
raise VeracodeAPIError("HTTP response body is empty")
else:
return r.content
else:
logger.debug("HTTP error for request:\r\n{}\r\n{}\r\n{}\r\n\r\n{}\r\n{}\r\n{}\r\n"
.format(r.request.url, r.request.headers, r.request.body, r.status_code, r.headers, r.content))
raise VeracodeAPIError("HTTP error: {}".format(r.status_code))
except requests.exceptions.RequestException as e:
logger.exception("Connection error")
raise VeracodeAPIError(e) | veracode_api_py/apihelper.py |
from urllib import parse
import requests
import logging
import json
import time
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
from veracode_api_signing.exceptions import VeracodeAPISigningException
from veracode_api_signing.plugin_requests import RequestsAuthPluginVeracodeHMAC
from veracode_api_signing.credentials import get_credentials
from veracode_api_signing.regions import get_region_for_api_credential
from .exceptions import VeracodeAPIError
from .log import VeracodeLog as vlog
from .constants import Constants
logger = logging.getLogger(__name__)
class APIHelper():
api_key_id = None
api_key_secret = None
region = None
def __init__(self, proxies=None, debug=False):
self.baseurl = self._get_baseurl()
requests.Session().mount(self.baseurl, HTTPAdapter(max_retries=3))
self.proxies = proxies
self.base_rest_url = self._get_baseresturl()
self.retry_seconds = 120
self.connect_error_msg = "Connection Error"
# vlog.setup_logging(self,debug=debug)
# helper functions
def _get_baseurl(self):
return self._get_region_url('xml')
def _get_baseresturl(self):
return self._get_region_url('rest')
def _get_region_url(self,type):
if self.api_key_id is None or self.api_key_secret is None:
self.api_key_id, self.api_key_secret = get_credentials()
if self.region is None:
self.region = get_region_for_api_credential(self.api_key_id)
if type == 'xml':
return Constants().REGIONS[self.region]['base_xml_url']
elif type == 'rest':
return Constants().REGIONS[self.region]['base_rest_url']
def _rest_request(self, url, method, params=None,body=None,fullresponse=False,use_base_url=True):
# base request method for a REST request
myheaders = {"User-Agent": "api.py"}
if method in ["POST", "PUT"]:
myheaders.update({'Content-type': 'application/json'})
retry_strategy = Retry(total=3,
status_forcelist=[429, 500, 502, 503, 504],
method_whitelist=["HEAD", "GET", "OPTIONS"]
)
session = requests.Session()
session.mount(self.base_rest_url, HTTPAdapter(max_retries=retry_strategy))
if use_base_url:
url = self.base_rest_url + url
try:
if method == "GET":
request = requests.Request(method, url, params=params, auth=RequestsAuthPluginVeracodeHMAC(), headers=myheaders)
prepared_request = request.prepare()
r = session.send(prepared_request, proxies=self.proxies)
elif method == "POST":
r = requests.post(url, params=params,auth=RequestsAuthPluginVeracodeHMAC(),headers=myheaders,data=body)
elif method == "PUT":
r = requests.put(url, params=params,auth=RequestsAuthPluginVeracodeHMAC(), headers=myheaders,data=body)
elif method == "DELETE":
r = requests.delete(url, params=params,auth=RequestsAuthPluginVeracodeHMAC(),headers=myheaders)
else:
raise VeracodeAPIError("Unsupported HTTP method")
except requests.exceptions.RequestException as e:
logger.exception(self.connect_error_msg)
raise VeracodeAPIError(e)
if not (r.status_code == requests.codes.ok):
logger.debug("API call returned non-200 HTTP status code: {}".format(r.status_code))
if not (r.ok):
logger.debug("Error retrieving data. HTTP status code: {}".format(r.status_code))
if r.status_code == 401:
logger.exception("Error [{}]: {} for request {}. Check that your Veracode API account credentials are correct.".format(r.status_code,
r.text, r.request.url))
else:
logger.exception("Error [{}]: {} for request {}".
format(r.status_code, r.text, r.request.url))
raise requests.exceptions.RequestException()
if fullresponse:
return r
elif r.text != "":
return r.json()
else:
return ""
def _rest_paged_request(self, uri, method, element, params=None):
all_data = []
page = 0
more_pages = True
while more_pages:
params['page']=page
page_data = self._rest_request(uri,method,params)
total_pages = page_data.get('page', {}).get('total_pages', 0)
data_page = page_data.get('_embedded', {}).get(element, [])
all_data += data_page
page += 1
more_pages = page < total_pages
return all_data
def _xml_request(self, url, method, params=None):
# base request method for XML APIs, handles what little error handling there is around these APIs
if method not in ["GET", "POST"]:
raise VeracodeAPIError("Unsupported HTTP method")
try:
session = requests.Session()
session.mount(self.baseurl, HTTPAdapter(max_retries=3))
request = requests.Request(method, url, params=params, auth=RequestsAuthPluginVeracodeHMAC(),headers={"User-Agent": "api.py"})
prepared_request = request.prepare()
r = session.send(prepared_request, proxies=self.proxies)
if 200 <= r.status_code <= 299:
if r.status_code == 204:
#retry after wait
time.sleep(self.retry_seconds)
return self._request(url,method,params)
elif r.content is None:
logger.debug("HTTP response body empty:\r\n{}\r\n{}\r\n{}\r\n\r\n{}\r\n{}\r\n{}\r\n"
.format(r.request.url, r.request.headers, r.request.body, r.status_code, r.headers, r.content))
raise VeracodeAPIError("HTTP response body is empty")
else:
return r.content
else:
logger.debug("HTTP error for request:\r\n{}\r\n{}\r\n{}\r\n\r\n{}\r\n{}\r\n{}\r\n"
.format(r.request.url, r.request.headers, r.request.body, r.status_code, r.headers, r.content))
raise VeracodeAPIError("HTTP error: {}".format(r.status_code))
except requests.exceptions.RequestException as e:
logger.exception("Connection error")
raise VeracodeAPIError(e) | 0.370795 | 0.050894 |
import requests
from source.util.settings import Settings
from source.util.timekeeper import Timestamps
from urllib.request import Request, urlopen
import webbrowser
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import chromedriver_autoinstaller
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
class UpdateNodes:
def __init__(self):
self.ts = Timestamps()
self.config = Settings('general.config')
self.url = 'http://' + self.config.get_setting('website', 'ip_address') + ':' + \
self.config.get_setting('website', 'port') + '/update'
self.headers = {'User-Agent': 'Mozilla/5.0'}
print(self.url)
chromedriver_autoinstaller.install()
options = Options()
options.headless = True
self.driver = webdriver.Chrome(options=options)
def update(self):
count = 1
start = 0
interval = self.config.get_int_setting('mesh_network', 'query_interval')
while True:
if self.ts.get_timestamp() - start > interval:
print('Mesh Network Query:', count)
start = self.ts.get_timestamp()
self.driver.get(self.url)
try:
element = WebDriverWait(self.driver, interval).until(
EC.presence_of_element_located((By.ID, 'update-complete'))
)
except Exception as e:
print(e)
continue
# finally:
# self.driver.quit()
# webbrowser.open_new(self.url)
# req = Request(self.url)
# webpage = urlopen(req).read()
# print(webpage)
# print(requests.get(self.url))
# print(requests.get('http://' + self.config.get_setting('website', 'ip_address') + ':' +
# self.config.get_setting('website', 'port') + '/_dash-layout'))
# print(requests.get('http://' + self.config.get_setting('website', 'ip_address') + ':' +
# self.config.get_setting('website', 'port') + '/_dash-dependencies'))
count += 1
def main():
updater = UpdateNodes()
updater.update()
if __name__ == '__main__':
main() | source/network/update_nodes.py | import requests
from source.util.settings import Settings
from source.util.timekeeper import Timestamps
from urllib.request import Request, urlopen
import webbrowser
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import chromedriver_autoinstaller
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
class UpdateNodes:
def __init__(self):
self.ts = Timestamps()
self.config = Settings('general.config')
self.url = 'http://' + self.config.get_setting('website', 'ip_address') + ':' + \
self.config.get_setting('website', 'port') + '/update'
self.headers = {'User-Agent': 'Mozilla/5.0'}
print(self.url)
chromedriver_autoinstaller.install()
options = Options()
options.headless = True
self.driver = webdriver.Chrome(options=options)
def update(self):
count = 1
start = 0
interval = self.config.get_int_setting('mesh_network', 'query_interval')
while True:
if self.ts.get_timestamp() - start > interval:
print('Mesh Network Query:', count)
start = self.ts.get_timestamp()
self.driver.get(self.url)
try:
element = WebDriverWait(self.driver, interval).until(
EC.presence_of_element_located((By.ID, 'update-complete'))
)
except Exception as e:
print(e)
continue
# finally:
# self.driver.quit()
# webbrowser.open_new(self.url)
# req = Request(self.url)
# webpage = urlopen(req).read()
# print(webpage)
# print(requests.get(self.url))
# print(requests.get('http://' + self.config.get_setting('website', 'ip_address') + ':' +
# self.config.get_setting('website', 'port') + '/_dash-layout'))
# print(requests.get('http://' + self.config.get_setting('website', 'ip_address') + ':' +
# self.config.get_setting('website', 'port') + '/_dash-dependencies'))
count += 1
def main():
updater = UpdateNodes()
updater.update()
if __name__ == '__main__':
main() | 0.202917 | 0.046486 |
from typing import Dict,Sequence,List
from dmt.entity import Entity
from dmt.blueprint import Blueprint
from .blueprints.gdfcylinder import GDFCylinderBlueprint
from typing import Dict
from sima.sima.moao import MOAO
from sima.sima.scriptablevalue import ScriptableValue
class GDFCylinder(MOAO):
"""
Keyword arguments
-----------------
name : str
(default "")
description : str
(default "")
_id : str
(default "")
scriptableValues : List[ScriptableValue]
dimensionalLength : float
Dimensional length(default 1.0)
centerX : float
Global x-coordinate(default 0.0)
centerY : float
Global y-coordinate(default 0.0)
radius : float
Radius of cyllinder(default 40.0)
numberOfRadialPanels : int
Number of panels around the circumference(default 20)
depth : float
Depth of cylinder (1 means equidistant)(default 20.0)
numberOfVerticalPanels : int
Number of depth levels(default 10)
exponent : float
Exponent in depth distribution(default 2.0)
"""
def __init__(self , name="", description="", _id="", dimensionalLength=1.0, centerX=0.0, centerY=0.0, radius=40.0, numberOfRadialPanels=20, depth=20.0, numberOfVerticalPanels=10, exponent=2.0, **kwargs):
super().__init__(**kwargs)
self.name = name
self.description = description
self._id = _id
self.scriptableValues = list()
self.dimensionalLength = dimensionalLength
self.centerX = centerX
self.centerY = centerY
self.radius = radius
self.numberOfRadialPanels = numberOfRadialPanels
self.depth = depth
self.numberOfVerticalPanels = numberOfVerticalPanels
self.exponent = exponent
for key, value in kwargs.items():
if not isinstance(value, Dict):
setattr(self, key, value)
@property
def blueprint(self) -> Blueprint:
"""Return blueprint that this entity represents"""
return GDFCylinderBlueprint()
@property
def name(self) -> str:
""""""
return self.__name
@name.setter
def name(self, value: str):
"""Set name"""
self.__name = str(value)
@property
def description(self) -> str:
""""""
return self.__description
@description.setter
def description(self, value: str):
"""Set description"""
self.__description = str(value)
@property
def _id(self) -> str:
""""""
return self.___id
@_id.setter
def _id(self, value: str):
"""Set _id"""
self.___id = str(value)
@property
def scriptableValues(self) -> List[ScriptableValue]:
""""""
return self.__scriptableValues
@scriptableValues.setter
def scriptableValues(self, value: List[ScriptableValue]):
"""Set scriptableValues"""
if not isinstance(value, Sequence):
raise Exception("Expected sequense, but was " , type(value))
self.__scriptableValues = value
@property
def dimensionalLength(self) -> float:
"""Dimensional length"""
return self.__dimensionalLength
@dimensionalLength.setter
def dimensionalLength(self, value: float):
"""Set dimensionalLength"""
self.__dimensionalLength = float(value)
@property
def centerX(self) -> float:
"""Global x-coordinate"""
return self.__centerX
@centerX.setter
def centerX(self, value: float):
"""Set centerX"""
self.__centerX = float(value)
@property
def centerY(self) -> float:
"""Global y-coordinate"""
return self.__centerY
@centerY.setter
def centerY(self, value: float):
"""Set centerY"""
self.__centerY = float(value)
@property
def radius(self) -> float:
"""Radius of cyllinder"""
return self.__radius
@radius.setter
def radius(self, value: float):
"""Set radius"""
self.__radius = float(value)
@property
def numberOfRadialPanels(self) -> int:
"""Number of panels around the circumference"""
return self.__numberOfRadialPanels
@numberOfRadialPanels.setter
def numberOfRadialPanels(self, value: int):
"""Set numberOfRadialPanels"""
self.__numberOfRadialPanels = int(value)
@property
def depth(self) -> float:
"""Depth of cylinder (1 means equidistant)"""
return self.__depth
@depth.setter
def depth(self, value: float):
"""Set depth"""
self.__depth = float(value)
@property
def numberOfVerticalPanels(self) -> int:
"""Number of depth levels"""
return self.__numberOfVerticalPanels
@numberOfVerticalPanels.setter
def numberOfVerticalPanels(self, value: int):
"""Set numberOfVerticalPanels"""
self.__numberOfVerticalPanels = int(value)
@property
def exponent(self) -> float:
"""Exponent in depth distribution"""
return self.__exponent
@exponent.setter
def exponent(self, value: float):
"""Set exponent"""
self.__exponent = float(value) | src/sima/hydro/gdfcylinder.py | from typing import Dict,Sequence,List
from dmt.entity import Entity
from dmt.blueprint import Blueprint
from .blueprints.gdfcylinder import GDFCylinderBlueprint
from typing import Dict
from sima.sima.moao import MOAO
from sima.sima.scriptablevalue import ScriptableValue
class GDFCylinder(MOAO):
"""
Keyword arguments
-----------------
name : str
(default "")
description : str
(default "")
_id : str
(default "")
scriptableValues : List[ScriptableValue]
dimensionalLength : float
Dimensional length(default 1.0)
centerX : float
Global x-coordinate(default 0.0)
centerY : float
Global y-coordinate(default 0.0)
radius : float
Radius of cyllinder(default 40.0)
numberOfRadialPanels : int
Number of panels around the circumference(default 20)
depth : float
Depth of cylinder (1 means equidistant)(default 20.0)
numberOfVerticalPanels : int
Number of depth levels(default 10)
exponent : float
Exponent in depth distribution(default 2.0)
"""
def __init__(self , name="", description="", _id="", dimensionalLength=1.0, centerX=0.0, centerY=0.0, radius=40.0, numberOfRadialPanels=20, depth=20.0, numberOfVerticalPanels=10, exponent=2.0, **kwargs):
super().__init__(**kwargs)
self.name = name
self.description = description
self._id = _id
self.scriptableValues = list()
self.dimensionalLength = dimensionalLength
self.centerX = centerX
self.centerY = centerY
self.radius = radius
self.numberOfRadialPanels = numberOfRadialPanels
self.depth = depth
self.numberOfVerticalPanels = numberOfVerticalPanels
self.exponent = exponent
for key, value in kwargs.items():
if not isinstance(value, Dict):
setattr(self, key, value)
@property
def blueprint(self) -> Blueprint:
"""Return blueprint that this entity represents"""
return GDFCylinderBlueprint()
@property
def name(self) -> str:
""""""
return self.__name
@name.setter
def name(self, value: str):
"""Set name"""
self.__name = str(value)
@property
def description(self) -> str:
""""""
return self.__description
@description.setter
def description(self, value: str):
"""Set description"""
self.__description = str(value)
@property
def _id(self) -> str:
""""""
return self.___id
@_id.setter
def _id(self, value: str):
"""Set _id"""
self.___id = str(value)
@property
def scriptableValues(self) -> List[ScriptableValue]:
""""""
return self.__scriptableValues
@scriptableValues.setter
def scriptableValues(self, value: List[ScriptableValue]):
"""Set scriptableValues"""
if not isinstance(value, Sequence):
raise Exception("Expected sequense, but was " , type(value))
self.__scriptableValues = value
@property
def dimensionalLength(self) -> float:
"""Dimensional length"""
return self.__dimensionalLength
@dimensionalLength.setter
def dimensionalLength(self, value: float):
"""Set dimensionalLength"""
self.__dimensionalLength = float(value)
@property
def centerX(self) -> float:
"""Global x-coordinate"""
return self.__centerX
@centerX.setter
def centerX(self, value: float):
"""Set centerX"""
self.__centerX = float(value)
@property
def centerY(self) -> float:
"""Global y-coordinate"""
return self.__centerY
@centerY.setter
def centerY(self, value: float):
"""Set centerY"""
self.__centerY = float(value)
@property
def radius(self) -> float:
"""Radius of cyllinder"""
return self.__radius
@radius.setter
def radius(self, value: float):
"""Set radius"""
self.__radius = float(value)
@property
def numberOfRadialPanels(self) -> int:
"""Number of panels around the circumference"""
return self.__numberOfRadialPanels
@numberOfRadialPanels.setter
def numberOfRadialPanels(self, value: int):
"""Set numberOfRadialPanels"""
self.__numberOfRadialPanels = int(value)
@property
def depth(self) -> float:
"""Depth of cylinder (1 means equidistant)"""
return self.__depth
@depth.setter
def depth(self, value: float):
"""Set depth"""
self.__depth = float(value)
@property
def numberOfVerticalPanels(self) -> int:
"""Number of depth levels"""
return self.__numberOfVerticalPanels
@numberOfVerticalPanels.setter
def numberOfVerticalPanels(self, value: int):
"""Set numberOfVerticalPanels"""
self.__numberOfVerticalPanels = int(value)
@property
def exponent(self) -> float:
"""Exponent in depth distribution"""
return self.__exponent
@exponent.setter
def exponent(self, value: float):
"""Set exponent"""
self.__exponent = float(value) | 0.88816 | 0.369315 |
__doc__ = """
Test arg parser
---------------
Test suite for arg_parser.
"""
import io
import argparse
import inspect
from unittest import TestCase, mock
from contextlib import redirect_stdout
from dataf import ArgParser
class CommandTest:
def run(self):
"""
Test command.
"""
raise NotImplementedError
class CustomSubParser:
def run(self, param):
"""
Test command.
"""
raise NotImplementedError
@staticmethod
def setup_sub_parser(sub_pars, signature, docstring):
sub_pars.add_argument(
'param', metavar='custom_sub_parser',
help='Custom sub parser.'
)
def command_without_param(): pass
def command_with_param(self, param): pass
def command_with_opt_param(param=None): pass
def command_with_annotation(param: ['a', 'b']): pass
class TestArgParserFunc(TestCase):
"""
Test for ArgParser class with a function as command.
"""
@classmethod
def setUpClass(cls):
cls.arg_parser = cls._create_arg_parser()
@staticmethod
def _test_command():
"""
Test command.
"""
raise NotImplementedError
@classmethod
def _create_arg_parser(cls, opt=None, commands=None):
"""
Create an ArgParser.
:param dict opt: options for ArgParser.
:param dict commands: commands for ArgParser.
"""
opt = opt or {'description': 'Test'}
commands = commands or {'test': cls._test_command}
arg_parser = ArgParser(opt, commands)
return arg_parser
def test_init_set_commands(self):
"""
Test __init__ method set commands.
"""
test_cmd = next(filter(
lambda x: getattr(x, '_name_parser_map', None) is not None,
self.arg_parser.parser._actions
))
self.assertIn('test', test_cmd.choices.keys())
def test_init_command_helper(self):
"""
Test __init__ method set commands help.
"""
test_cmd = next(filter(
lambda x: getattr(x, '_name_parser_map', None) is not None,
self.arg_parser.parser._actions
))
self.assertEqual('Test command.', test_cmd._choices_actions[0].help)
def test_parse(self):
"""
Test parse function.
"""
with mock.patch('sys.argv', ['test', 'test']):
with self.assertRaises(NotImplementedError):
self.arg_parser.parse()
def test_parse_without_command(self):
"""
Test parse function without command.
"""
f = io.StringIO()
with mock.patch('sys.argv', ['test']):
with redirect_stdout(f):
self.arg_parser.parse()
parse = f.getvalue()
self.assertEqual(parse, self.arg_parser.parser.format_help())
class TestArgParserClass(TestArgParserFunc):
"""
Test for ArgParser class with a class as command.
"""
@classmethod
def setUpClass(cls):
cls.arg_parser = cls._create_arg_parser(commands={'test': CommandTest})
def test_init_create_arg_parser(self):
"""
Test __init__ method create and ArgParser instance.
"""
self.assertIsInstance(self.arg_parser, ArgParser)
def test_init_create_argument_parser(self):
"""
Test __init__ method create and ArgumentParser instance.
"""
self.assertIsInstance(self.arg_parser.parser, argparse.ArgumentParser)
def test_init_set_description(self):
"""
Test __init__ method set parser description.
"""
self.assertEqual(self.arg_parser.parser.description, 'Test')
def test_init_with_custom_set_sub_parser(self):
"""
Test __init__ method with a class containing a custom set_sub_parser method.
"""
parser = self._create_arg_parser(commands={'test': CustomSubParser})
test_cmd = next(filter(
lambda x: getattr(x, '_name_parser_map', None) is not None,
parser.parser._actions
))
self.assertEqual(
'custom_sub_parser',
test_cmd.choices['test']._get_positional_actions()[0].metavar
)
def test_docstring_args(self):
"""
Test _docstring_args return dict with docstring param as ReStructuredText.
"""
args = ArgParser._docstring_args(
"""
Test docstring.
:param str test: test string.
:param str test2: second test string.
"""
)
self.assertEqual(
{'test': 'test string.', 'test2': 'second test string.'}, args
)
def test_docstring_args_with_empty_string(self):
"""
Test _docstring_args with an empty docstring.
"""
args = ArgParser._docstring_args("")
self.assertEqual({}, args)
def test_docstring_args_with_none(self):
"""
Test _docstring_args with None (no docstring in function).
"""
args = ArgParser._docstring_args(None)
self.assertEqual({}, args)
def test_docstring_desc(self):
"""
Test _docstring_desc return first line of docstring.
"""
description = ArgParser._docstring_desc(
"""
Test docstring.
Second line.
:param str test: test string.
:param str test2: second test string.
"""
)
self.assertEqual('Test docstring.', description)
def test_docstring_desc_with_empty_string(self):
"""
Test _docstring_desc with an empty docstring.
"""
description = ArgParser._docstring_desc('')
self.assertEqual('', description)
def test_docstring_desc_with_none(self):
"""
Test _docstring_desc with None.
"""
description = ArgParser._docstring_desc(None)
self.assertEqual('', description)
@property
def _sub_pars(self):
"""
Create a sub_parser object.
"""
parser = argparse.ArgumentParser()
sub_parsers = parser.add_subparsers()
sub_pars = sub_parsers.add_parser('test')
return sub_pars
def test_setup_sub_parser_without_param(self):
"""
Test _setup_sub_parser method with a command without param.
"""
sub_pars = self._sub_pars
with mock.patch('dataf.arg_parser.argparse.ArgumentParser.add_argument') as m:
signature = inspect.signature(command_without_param)
docstring = self.arg_parser._docstring_args(
inspect.getdoc(command_without_param)
)
self.arg_parser._setup_sub_parser(sub_pars, signature, docstring)
m.assert_not_called()
def test_setup_sub_parser_with_param(self):
"""
Test _setup_sub_parser method with a command with param.
"""
sub_pars = self._sub_pars
with mock.patch('dataf.arg_parser.argparse.ArgumentParser.add_argument') as m:
sub_pars.set_defaults(command=command_with_param)
signature = inspect.signature(command_with_param)
docstring = self.arg_parser._docstring_args(
inspect.getdoc(command_with_param)
)
self.arg_parser._setup_sub_parser(sub_pars, signature, docstring)
m.assert_called_with('param', help='', metavar='param')
def test_setup_sub_parser_with_opt_param(self):
"""
Test _setup_sub_parser method with a command with optional param.
"""
sub_pars = self._sub_pars
with mock.patch('dataf.arg_parser.argparse.ArgumentParser.add_argument') as m:
signature = inspect.signature(command_with_opt_param)
docstring = self.arg_parser._docstring_args(
inspect.getdoc(command_with_opt_param)
)
self.arg_parser._setup_sub_parser(sub_pars, signature, docstring)
m.assert_called_with(
'--param', default=None, help='', metavar='param'
)
def test_setup_sub_parser_with_annotation(self):
"""
Test _setup_sub_parser method with a command with param annotation.
"""
sub_pars = self._sub_pars
with mock.patch('dataf.arg_parser.argparse.ArgumentParser.add_argument') as m:
signature = inspect.signature(command_with_annotation)
docstring = self.arg_parser._docstring_args(
inspect.getdoc(command_with_annotation)
)
self.arg_parser._setup_sub_parser(sub_pars, signature, docstring)
m.assert_called_with(
'param', choices=['a', 'b'],
help=' (choices: %(choices)s)', metavar='param'
) | dataf/tests/test_arg_parser.py |
__doc__ = """
Test arg parser
---------------
Test suite for arg_parser.
"""
import io
import argparse
import inspect
from unittest import TestCase, mock
from contextlib import redirect_stdout
from dataf import ArgParser
class CommandTest:
def run(self):
"""
Test command.
"""
raise NotImplementedError
class CustomSubParser:
def run(self, param):
"""
Test command.
"""
raise NotImplementedError
@staticmethod
def setup_sub_parser(sub_pars, signature, docstring):
sub_pars.add_argument(
'param', metavar='custom_sub_parser',
help='Custom sub parser.'
)
def command_without_param(): pass
def command_with_param(self, param): pass
def command_with_opt_param(param=None): pass
def command_with_annotation(param: ['a', 'b']): pass
class TestArgParserFunc(TestCase):
"""
Test for ArgParser class with a function as command.
"""
@classmethod
def setUpClass(cls):
cls.arg_parser = cls._create_arg_parser()
@staticmethod
def _test_command():
"""
Test command.
"""
raise NotImplementedError
@classmethod
def _create_arg_parser(cls, opt=None, commands=None):
"""
Create an ArgParser.
:param dict opt: options for ArgParser.
:param dict commands: commands for ArgParser.
"""
opt = opt or {'description': 'Test'}
commands = commands or {'test': cls._test_command}
arg_parser = ArgParser(opt, commands)
return arg_parser
def test_init_set_commands(self):
"""
Test __init__ method set commands.
"""
test_cmd = next(filter(
lambda x: getattr(x, '_name_parser_map', None) is not None,
self.arg_parser.parser._actions
))
self.assertIn('test', test_cmd.choices.keys())
def test_init_command_helper(self):
"""
Test __init__ method set commands help.
"""
test_cmd = next(filter(
lambda x: getattr(x, '_name_parser_map', None) is not None,
self.arg_parser.parser._actions
))
self.assertEqual('Test command.', test_cmd._choices_actions[0].help)
def test_parse(self):
"""
Test parse function.
"""
with mock.patch('sys.argv', ['test', 'test']):
with self.assertRaises(NotImplementedError):
self.arg_parser.parse()
def test_parse_without_command(self):
"""
Test parse function without command.
"""
f = io.StringIO()
with mock.patch('sys.argv', ['test']):
with redirect_stdout(f):
self.arg_parser.parse()
parse = f.getvalue()
self.assertEqual(parse, self.arg_parser.parser.format_help())
class TestArgParserClass(TestArgParserFunc):
"""
Test for ArgParser class with a class as command.
"""
@classmethod
def setUpClass(cls):
cls.arg_parser = cls._create_arg_parser(commands={'test': CommandTest})
def test_init_create_arg_parser(self):
"""
Test __init__ method create and ArgParser instance.
"""
self.assertIsInstance(self.arg_parser, ArgParser)
def test_init_create_argument_parser(self):
"""
Test __init__ method create and ArgumentParser instance.
"""
self.assertIsInstance(self.arg_parser.parser, argparse.ArgumentParser)
def test_init_set_description(self):
"""
Test __init__ method set parser description.
"""
self.assertEqual(self.arg_parser.parser.description, 'Test')
def test_init_with_custom_set_sub_parser(self):
"""
Test __init__ method with a class containing a custom set_sub_parser method.
"""
parser = self._create_arg_parser(commands={'test': CustomSubParser})
test_cmd = next(filter(
lambda x: getattr(x, '_name_parser_map', None) is not None,
parser.parser._actions
))
self.assertEqual(
'custom_sub_parser',
test_cmd.choices['test']._get_positional_actions()[0].metavar
)
def test_docstring_args(self):
"""
Test _docstring_args return dict with docstring param as ReStructuredText.
"""
args = ArgParser._docstring_args(
"""
Test docstring.
:param str test: test string.
:param str test2: second test string.
"""
)
self.assertEqual(
{'test': 'test string.', 'test2': 'second test string.'}, args
)
def test_docstring_args_with_empty_string(self):
"""
Test _docstring_args with an empty docstring.
"""
args = ArgParser._docstring_args("")
self.assertEqual({}, args)
def test_docstring_args_with_none(self):
"""
Test _docstring_args with None (no docstring in function).
"""
args = ArgParser._docstring_args(None)
self.assertEqual({}, args)
def test_docstring_desc(self):
"""
Test _docstring_desc return first line of docstring.
"""
description = ArgParser._docstring_desc(
"""
Test docstring.
Second line.
:param str test: test string.
:param str test2: second test string.
"""
)
self.assertEqual('Test docstring.', description)
def test_docstring_desc_with_empty_string(self):
"""
Test _docstring_desc with an empty docstring.
"""
description = ArgParser._docstring_desc('')
self.assertEqual('', description)
def test_docstring_desc_with_none(self):
"""
Test _docstring_desc with None.
"""
description = ArgParser._docstring_desc(None)
self.assertEqual('', description)
@property
def _sub_pars(self):
"""
Create a sub_parser object.
"""
parser = argparse.ArgumentParser()
sub_parsers = parser.add_subparsers()
sub_pars = sub_parsers.add_parser('test')
return sub_pars
def test_setup_sub_parser_without_param(self):
"""
Test _setup_sub_parser method with a command without param.
"""
sub_pars = self._sub_pars
with mock.patch('dataf.arg_parser.argparse.ArgumentParser.add_argument') as m:
signature = inspect.signature(command_without_param)
docstring = self.arg_parser._docstring_args(
inspect.getdoc(command_without_param)
)
self.arg_parser._setup_sub_parser(sub_pars, signature, docstring)
m.assert_not_called()
def test_setup_sub_parser_with_param(self):
"""
Test _setup_sub_parser method with a command with param.
"""
sub_pars = self._sub_pars
with mock.patch('dataf.arg_parser.argparse.ArgumentParser.add_argument') as m:
sub_pars.set_defaults(command=command_with_param)
signature = inspect.signature(command_with_param)
docstring = self.arg_parser._docstring_args(
inspect.getdoc(command_with_param)
)
self.arg_parser._setup_sub_parser(sub_pars, signature, docstring)
m.assert_called_with('param', help='', metavar='param')
def test_setup_sub_parser_with_opt_param(self):
"""
Test _setup_sub_parser method with a command with optional param.
"""
sub_pars = self._sub_pars
with mock.patch('dataf.arg_parser.argparse.ArgumentParser.add_argument') as m:
signature = inspect.signature(command_with_opt_param)
docstring = self.arg_parser._docstring_args(
inspect.getdoc(command_with_opt_param)
)
self.arg_parser._setup_sub_parser(sub_pars, signature, docstring)
m.assert_called_with(
'--param', default=None, help='', metavar='param'
)
def test_setup_sub_parser_with_annotation(self):
"""
Test _setup_sub_parser method with a command with param annotation.
"""
sub_pars = self._sub_pars
with mock.patch('dataf.arg_parser.argparse.ArgumentParser.add_argument') as m:
signature = inspect.signature(command_with_annotation)
docstring = self.arg_parser._docstring_args(
inspect.getdoc(command_with_annotation)
)
self.arg_parser._setup_sub_parser(sub_pars, signature, docstring)
m.assert_called_with(
'param', choices=['a', 'b'],
help=' (choices: %(choices)s)', metavar='param'
) | 0.693265 | 0.394609 |
import sys,getopt
from CommonDefs import CommonDefs
def fileToTuples(file, delimiter):
f1 = open(file,"r")
data1 = [] #list of tuples from f1
for line in f1.readlines():
line = line.strip()
tokens = line.split(delimiter)
tuple = []
for token in tokens:
tuple.append(token.strip())
if(len(line) >0 and len(tuple ) > 0):
data1.append(tuple)
f1.close()
return data1
def main(argv):
'''Main function that does the actual work your description goes in here.
Args:
infile1 and infile2 are the two files to be compared for value similarity with default delimiter of "|"
Returns:
0 if the two files match else 1
The code assumes similar listing of attributes in the two files
'''
infile1=""
infile2=""
delimiter="|"
algo = ""
try:
opts, args = getopt.getopt(argv,"hf:F:a:",["infile1=","infile2=","algorithm="])
except getopt.GetoptError:
print 'test.py -f <inputfile1> -F <inputfile2> -a <graph_algorithm>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'test.py -f <inputfile1> -F <inputfile2>'
sys.exit()
elif opt in ("-f", "--infile1"):
infile1 = arg
elif opt in ("-F", "--infile2"):
infile2 = arg
elif opt in ("-a", "--algorithm"):
algo = arg
print "algo=", algo
data1 = fileToTuples(infile1,delimiter)
data2 = fileToTuples(infile2,delimiter)
if algo == 'toposort':
#dependency verification as multiple topo ordering possible
depen_map = dict()
for entry in data1:
vid = int(entry[0])
val = int(entry[1])
depen_map[vid] = val
for entry in data2:
vid = int(entry[0])
vertex_rank = depen_map[vid]
dependencies = entry[1].split(",")
for val in dependencies:
depen_rank = depen_map[int(val)]
if vertex_rank <= depen_rank:
return 1
return 0
else:
if len(data1) != len(data2):
return 1
else:
for i,val in enumerate(data1):
if(len(data1[i]) != len(data2[i])):
return 1
if(data1[i] != data2[i]):
if(CommonDefs.INT_MAX in data1[i] or CommonDefs.INT_MAX in data2[i]):
return 2
else:
return 1
return 0
if __name__ == "__main__":
rc = main(sys.argv[1:])
if rc > 0:
if rc == 2:
print 'Input graph is disconnected and the current implementation of WCC does not support disconnected graphs'
sys.exit(0)
else:
print 'Actual and Expected outputs are different'
sys.exit(1)
else:
print 'Actual and Expected outputs are similar'
sys.exit(0) | compareoutput.py | import sys,getopt
from CommonDefs import CommonDefs
def fileToTuples(file, delimiter):
f1 = open(file,"r")
data1 = [] #list of tuples from f1
for line in f1.readlines():
line = line.strip()
tokens = line.split(delimiter)
tuple = []
for token in tokens:
tuple.append(token.strip())
if(len(line) >0 and len(tuple ) > 0):
data1.append(tuple)
f1.close()
return data1
def main(argv):
'''Main function that does the actual work your description goes in here.
Args:
infile1 and infile2 are the two files to be compared for value similarity with default delimiter of "|"
Returns:
0 if the two files match else 1
The code assumes similar listing of attributes in the two files
'''
infile1=""
infile2=""
delimiter="|"
algo = ""
try:
opts, args = getopt.getopt(argv,"hf:F:a:",["infile1=","infile2=","algorithm="])
except getopt.GetoptError:
print 'test.py -f <inputfile1> -F <inputfile2> -a <graph_algorithm>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'test.py -f <inputfile1> -F <inputfile2>'
sys.exit()
elif opt in ("-f", "--infile1"):
infile1 = arg
elif opt in ("-F", "--infile2"):
infile2 = arg
elif opt in ("-a", "--algorithm"):
algo = arg
print "algo=", algo
data1 = fileToTuples(infile1,delimiter)
data2 = fileToTuples(infile2,delimiter)
if algo == 'toposort':
#dependency verification as multiple topo ordering possible
depen_map = dict()
for entry in data1:
vid = int(entry[0])
val = int(entry[1])
depen_map[vid] = val
for entry in data2:
vid = int(entry[0])
vertex_rank = depen_map[vid]
dependencies = entry[1].split(",")
for val in dependencies:
depen_rank = depen_map[int(val)]
if vertex_rank <= depen_rank:
return 1
return 0
else:
if len(data1) != len(data2):
return 1
else:
for i,val in enumerate(data1):
if(len(data1[i]) != len(data2[i])):
return 1
if(data1[i] != data2[i]):
if(CommonDefs.INT_MAX in data1[i] or CommonDefs.INT_MAX in data2[i]):
return 2
else:
return 1
return 0
if __name__ == "__main__":
rc = main(sys.argv[1:])
if rc > 0:
if rc == 2:
print 'Input graph is disconnected and the current implementation of WCC does not support disconnected graphs'
sys.exit(0)
else:
print 'Actual and Expected outputs are different'
sys.exit(1)
else:
print 'Actual and Expected outputs are similar'
sys.exit(0) | 0.126124 | 0.293613 |
import csv
# RICS .CSV
ricsFileName = 'Oboz'
ricsFile = open(ricsFileName + '.csv')
ricsReader = csv.reader(ricsFile)
ricsData = list(ricsReader)
# AMAZON .CSV
amzFileName = 'Amazon'
amzFile = open(amzFileName + '.csv')
amzReader = csv.reader(amzFile)
amzData = list(amzReader)
# Number of possible rows to go through from Amazon's CSV file
numRows = (len(amzData) + 1)
numRowsRics = (len(ricsData) + 1)
# Name of blank output csv file
outputFileName = 'output ' + ricsFileName
outputFile = open(outputFileName+'.csv', 'w', newline='')
outputWriter = csv.writer(outputFile)
outputWriter.writerow(['Brand', 'RICS SKU', 'Size', '', 'AMZ SKU', 'AMZ Qty'])
# Counting Variables, j must start at 1 so it starts in the right row.
j = 1
i = 0
# Main
while i < numRows:
if j == (numRowsRics-2):
break
amzSku = (amzData[i][0])
amzQty = (amzData[i][3])
ricsSku = (ricsData[j][2])
ricsSupplier = (ricsData[j][7])
ricsSize = (ricsData[j][15])
ricsWidth = (ricsData[j][16])
ricsQty = (ricsData[j][18])
# All possible SKUs on Amazon (so far)
skuOne = ricsSku + ' ' + ricsSize + ' ' + ricsWidth
skuTwo = ricsSku + ' ' + ricsSize + ricsWidth
skuThree = ricsSku + ' ' + ricsSize + '(' + ricsWidth + ')'
skuFour = ricsSku + ' ' + ricsSize + ' ' + '(' + ricsWidth + ')'
skuFive = ricsSku + ' ' + ricsSize + ' ' + 'ricsWidth'
skuSix = ricsSku + ' ' + ricsSize + ricsWidth
skuSeven = ricsSku + ' ' + ricsSize + '(' + 'ricsWidth' + ')'
skuEight = ricsSku + ' ' + ricsSize + ' ' + '(' + 'ricsWidth' + ')'
skuNine = ricsSku + ' ' + ricsSize + ' ' + 'ricsWidth'
skuTen = ricsSku + ' ' + ricsSize + 'ricsWidth'
skuEleven = ricsSku + ' ' + ricsSize + '(' + 'ricsWidth' + ')'
skuTwelve = ricsSku + ' ' + ricsSize + ' ' + '(' + 'ricsWidth' + ')'
# In case some SKUs are named with EE on Amazon
if ricsWidth == "2E":
ricsWidth2 = "EE"
skuOne = ricsSku + ' ' + ricsSize + ' ' + ricsWidth
skuTwo = ricsSku + ' ' + ricsSize + ricsWidth
skuThree = ricsSku + ' ' + ricsSize + '(' + ricsWidth + ')'
skuFour = ricsSku + ' ' + ricsSize + ' ' + '(' + ricsWidth + ')'
skuFive = ricsSku + ' ' + ricsSize + ' ' + ricsWidth2
skuSix = ricsSku + ' ' + ricsSize + ricsWidth2
skuSeven = ricsSku + ' ' + ricsSize + '(' + ricsWidth2 + ')'
skuEight = ricsSku + ' ' + ricsSize + ' ' + '(' + ricsWidth2 + ')'
# In case some SKUs are named with EEE on Amazon
elif ricsWidth == "3E":
ricsWidth3 = "EEE"
skuOne = ricsSku + ' ' + ricsSize + ' ' + ricsWidth
skuTwo = ricsSku + ' ' + ricsSize + ricsWidth
skuThree = ricsSku + ' ' + ricsSize + '(' + ricsWidth + ')'
skuFour = ricsSku + ' ' + ricsSize + ' ' + '(' + ricsWidth + ')'
skuNine = ricsSku + ' ' + ricsSize + ' ' + ricsWidth3
skuTen = ricsSku + ' ' + ricsSize + ricsWidth3
skuEleven = ricsSku + ' ' + ricsSize + '(' + ricsWidth3 + ')'
skuTwelve = ricsSku + ' ' + ricsSize + ' ' + '(' + ricsWidth3 + ')'
# X = SKU
# Y = SIZE
# Z = WIDTH
# Checking if SKU is in form "X Y Z"
if amzSku == skuOne:
if amzQty == ricsQty:
j += 1
i=0
elif amzQty == '':
j += 1
i=0
elif amzQty != ricsQty:
rics = (float(ricsQty))
amz = (float(amzQty))
qty = (rics - amz)
outputWriter.writerow([ricsSupplier, ricsSku, ricsSize + ' ' + ' ' + ricsWidth, '', amzSku, qty])
j += 1
i=0
# Checking if SKU is in form "X YZ"
elif amzSku == skuTwo:
if amzQty == ricsQty:
j += 1
i=0
elif amzQty == '':
j += 1
i=0
elif amzQty != ricsQty:
rics = (float(ricsQty))
amz = (float(amzQty))
qty = (rics - amz)
outputWriter.writerow([ricsSupplier, ricsSku, ricsSize + ' ' + ' ' + ricsWidth, '', amzSku, qty])
j += 1
i=0
# Checking if SKU is in form "X Y(Z)"
elif amzSku == skuThree:
if amzQty == ricsQty:
j += 1
i=0
elif amzQty == '':
j += 1
i=0
elif amzQty != ricsQty:
rics = (float(ricsQty))
amz = (float(amzQty))
qty = (rics - amz)
outputWriter.writerow([ricsSupplier, ricsSku, ricsSize + ' ' + ' ' + ricsWidth, '', amzSku, qty])
j += 1
i=0
# Checking if SKU is in form "X Y (Z)"
elif amzSku == skuFour:
if amzQty == ricsQty:
j += 1
i=0
elif amzQty == '':
j += 1
i=0
elif amzQty != ricsQty:
rics = (float(ricsQty))
amz = (float(amzQty))
qty = (rics - amz)
outputWriter.writerow([ricsSupplier, ricsSku, ricsSize + ' ' + ' ' + ricsWidth, '', amzSku, qty])
j += 1
i=0
# Checking if SKU is in form "X Y ZZ"
elif amzSku == skuFive:
if amzQty == ricsQty:
j += 1
i=0
elif amzQty == '':
j += 1
i=0
elif amzQty != ricsQty:
rics = (float(ricsQty))
amz = (float(amzQty))
qty = (rics - amz)
outputWriter.writerow([ricsSupplier, ricsSku, ricsSize + ' ' + ' ' + ricsWidth, '', amzSku, qty])
j += 1
i=0
# Checking if SKU is in form "X YZZ"
elif amzSku == skuSix:
if amzQty == ricsQty:
j += 1
i=0
elif amzQty == '':
j += 1
i=0
elif amzQty != ricsQty:
rics = (float(ricsQty))
amz = (float(amzQty))
qty = (rics - amz)
outputWriter.writerow([ricsSupplier, ricsSku, ricsSize + ' ' + ' ' + ricsWidth, '', amzSku, qty])
j += 1
i=0
# Checking if SKU is in form "X Y(ZZ)"
elif amzSku == skuSeven:
if amzQty == ricsQty:
j += 1
i=0
elif amzQty == '':
j += 1
i=0
elif amzQty != ricsQty:
rics = (float(ricsQty))
amz = (float(amzQty))
qty = (rics - amz)
outputWriter.writerow([ricsSupplier, ricsSku, ricsSize + ' ' + ' ' + ricsWidth, '', amzSku, qty])
j += 1
i=0
# Checking if SKU is in form "X Y (ZZ)"
elif amzSku == skuEight:
if amzQty == ricsQty:
j += 1
i=0
elif amzQty == '':
j += 1
i=0
elif amzQty != ricsQty:
rics = (float(ricsQty))
amz = (float(amzQty))
qty = (rics - amz)
outputWriter.writerow([ricsSupplier, ricsSku, ricsSize + ' ' + ' ' + ricsWidth, '', amzSku, qty])
j += 1
i=0
# Checking if SKU is in form "X Y ZZZ"
elif amzSku == skuNine:
if amzQty == ricsQty:
j += 1
i=0
elif amzQty == '':
j += 1
i=0
elif amzQty != ricsQty:
rics = (float(ricsQty))
amz = (float(amzQty))
qty = (rics - amz)
outputWriter.writerow([ricsSupplier, ricsSku, ricsSize + ' ' + ' ' + ricsWidth, '', amzSku, qty])
j += 1
i=0
# Checking if SKU is in form "X YZZZ"
elif amzSku == skuTen:
if amzQty == ricsQty:
j += 1
i=0
elif amzQty == '':
j += 1
i=0
elif amzQty != ricsQty:
rics = (float(ricsQty))
amz = (float(amzQty))
qty = (rics - amz)
outputWriter.writerow([ricsSupplier, ricsSku, ricsSize + ' ' + ' ' + ricsWidth, '', amzSku, qty])
j += 1
i=0
# Checking if SKU is in form "X Y(ZZZ)"
elif amzSku == skuEleven:
if amzQty == ricsQty:
j += 1
i=0
elif amzQty == '':
j += 1
i=0
elif amzQty != ricsQty:
rics = (float(ricsQty))
amz = (float(amzQty))
qty = (rics - amz)
outputWriter.writerow([ricsSupplier, ricsSku, ricsSize + ' ' + ' ' + ricsWidth, '', amzSku, qty])
j += 1
i=0
# Checking if SKU is in form "X Y (ZZZ)"
elif amzSku == skuTwelve:
if amzQty == ricsQty:
j += 1
i=0
elif amzQty == '':
j += 1
i=0
elif amzQty != ricsQty:
rics = (float(ricsQty))
amz = (float(amzQty))
qty = (rics - amz)
outputWriter.writerow([ricsSupplier, ricsSku, ricsSize + ' ' + ' ' + ricsWidth, '', amzSku, qty])
j += 1
i=0
# If it finds none of those forms and it reaches the end, write the SKU into the CSV file for Amazon listing.
else:
if i == 6362:
outputWriter.writerow([ricsSupplier, ricsSku, ricsSize + ' ' + ' ' + ricsWidth])
j += 1
i=0
i += 1
print("Finished!")
outputFile.close() | stockcheck.py | import csv
# RICS .CSV
ricsFileName = 'Oboz'
ricsFile = open(ricsFileName + '.csv')
ricsReader = csv.reader(ricsFile)
ricsData = list(ricsReader)
# AMAZON .CSV
amzFileName = 'Amazon'
amzFile = open(amzFileName + '.csv')
amzReader = csv.reader(amzFile)
amzData = list(amzReader)
# Number of possible rows to go through from Amazon's CSV file
numRows = (len(amzData) + 1)
numRowsRics = (len(ricsData) + 1)
# Name of blank output csv file
outputFileName = 'output ' + ricsFileName
outputFile = open(outputFileName+'.csv', 'w', newline='')
outputWriter = csv.writer(outputFile)
outputWriter.writerow(['Brand', 'RICS SKU', 'Size', '', 'AMZ SKU', 'AMZ Qty'])
# Counting Variables, j must start at 1 so it starts in the right row.
j = 1
i = 0
# Main
while i < numRows:
if j == (numRowsRics-2):
break
amzSku = (amzData[i][0])
amzQty = (amzData[i][3])
ricsSku = (ricsData[j][2])
ricsSupplier = (ricsData[j][7])
ricsSize = (ricsData[j][15])
ricsWidth = (ricsData[j][16])
ricsQty = (ricsData[j][18])
# All possible SKUs on Amazon (so far)
skuOne = ricsSku + ' ' + ricsSize + ' ' + ricsWidth
skuTwo = ricsSku + ' ' + ricsSize + ricsWidth
skuThree = ricsSku + ' ' + ricsSize + '(' + ricsWidth + ')'
skuFour = ricsSku + ' ' + ricsSize + ' ' + '(' + ricsWidth + ')'
skuFive = ricsSku + ' ' + ricsSize + ' ' + 'ricsWidth'
skuSix = ricsSku + ' ' + ricsSize + ricsWidth
skuSeven = ricsSku + ' ' + ricsSize + '(' + 'ricsWidth' + ')'
skuEight = ricsSku + ' ' + ricsSize + ' ' + '(' + 'ricsWidth' + ')'
skuNine = ricsSku + ' ' + ricsSize + ' ' + 'ricsWidth'
skuTen = ricsSku + ' ' + ricsSize + 'ricsWidth'
skuEleven = ricsSku + ' ' + ricsSize + '(' + 'ricsWidth' + ')'
skuTwelve = ricsSku + ' ' + ricsSize + ' ' + '(' + 'ricsWidth' + ')'
# In case some SKUs are named with EE on Amazon
if ricsWidth == "2E":
ricsWidth2 = "EE"
skuOne = ricsSku + ' ' + ricsSize + ' ' + ricsWidth
skuTwo = ricsSku + ' ' + ricsSize + ricsWidth
skuThree = ricsSku + ' ' + ricsSize + '(' + ricsWidth + ')'
skuFour = ricsSku + ' ' + ricsSize + ' ' + '(' + ricsWidth + ')'
skuFive = ricsSku + ' ' + ricsSize + ' ' + ricsWidth2
skuSix = ricsSku + ' ' + ricsSize + ricsWidth2
skuSeven = ricsSku + ' ' + ricsSize + '(' + ricsWidth2 + ')'
skuEight = ricsSku + ' ' + ricsSize + ' ' + '(' + ricsWidth2 + ')'
# In case some SKUs are named with EEE on Amazon
elif ricsWidth == "3E":
ricsWidth3 = "EEE"
skuOne = ricsSku + ' ' + ricsSize + ' ' + ricsWidth
skuTwo = ricsSku + ' ' + ricsSize + ricsWidth
skuThree = ricsSku + ' ' + ricsSize + '(' + ricsWidth + ')'
skuFour = ricsSku + ' ' + ricsSize + ' ' + '(' + ricsWidth + ')'
skuNine = ricsSku + ' ' + ricsSize + ' ' + ricsWidth3
skuTen = ricsSku + ' ' + ricsSize + ricsWidth3
skuEleven = ricsSku + ' ' + ricsSize + '(' + ricsWidth3 + ')'
skuTwelve = ricsSku + ' ' + ricsSize + ' ' + '(' + ricsWidth3 + ')'
# X = SKU
# Y = SIZE
# Z = WIDTH
# Checking if SKU is in form "X Y Z"
if amzSku == skuOne:
if amzQty == ricsQty:
j += 1
i=0
elif amzQty == '':
j += 1
i=0
elif amzQty != ricsQty:
rics = (float(ricsQty))
amz = (float(amzQty))
qty = (rics - amz)
outputWriter.writerow([ricsSupplier, ricsSku, ricsSize + ' ' + ' ' + ricsWidth, '', amzSku, qty])
j += 1
i=0
# Checking if SKU is in form "X YZ"
elif amzSku == skuTwo:
if amzQty == ricsQty:
j += 1
i=0
elif amzQty == '':
j += 1
i=0
elif amzQty != ricsQty:
rics = (float(ricsQty))
amz = (float(amzQty))
qty = (rics - amz)
outputWriter.writerow([ricsSupplier, ricsSku, ricsSize + ' ' + ' ' + ricsWidth, '', amzSku, qty])
j += 1
i=0
# Checking if SKU is in form "X Y(Z)"
elif amzSku == skuThree:
if amzQty == ricsQty:
j += 1
i=0
elif amzQty == '':
j += 1
i=0
elif amzQty != ricsQty:
rics = (float(ricsQty))
amz = (float(amzQty))
qty = (rics - amz)
outputWriter.writerow([ricsSupplier, ricsSku, ricsSize + ' ' + ' ' + ricsWidth, '', amzSku, qty])
j += 1
i=0
# Checking if SKU is in form "X Y (Z)"
elif amzSku == skuFour:
if amzQty == ricsQty:
j += 1
i=0
elif amzQty == '':
j += 1
i=0
elif amzQty != ricsQty:
rics = (float(ricsQty))
amz = (float(amzQty))
qty = (rics - amz)
outputWriter.writerow([ricsSupplier, ricsSku, ricsSize + ' ' + ' ' + ricsWidth, '', amzSku, qty])
j += 1
i=0
# Checking if SKU is in form "X Y ZZ"
elif amzSku == skuFive:
if amzQty == ricsQty:
j += 1
i=0
elif amzQty == '':
j += 1
i=0
elif amzQty != ricsQty:
rics = (float(ricsQty))
amz = (float(amzQty))
qty = (rics - amz)
outputWriter.writerow([ricsSupplier, ricsSku, ricsSize + ' ' + ' ' + ricsWidth, '', amzSku, qty])
j += 1
i=0
# Checking if SKU is in form "X YZZ"
elif amzSku == skuSix:
if amzQty == ricsQty:
j += 1
i=0
elif amzQty == '':
j += 1
i=0
elif amzQty != ricsQty:
rics = (float(ricsQty))
amz = (float(amzQty))
qty = (rics - amz)
outputWriter.writerow([ricsSupplier, ricsSku, ricsSize + ' ' + ' ' + ricsWidth, '', amzSku, qty])
j += 1
i=0
# Checking if SKU is in form "X Y(ZZ)"
elif amzSku == skuSeven:
if amzQty == ricsQty:
j += 1
i=0
elif amzQty == '':
j += 1
i=0
elif amzQty != ricsQty:
rics = (float(ricsQty))
amz = (float(amzQty))
qty = (rics - amz)
outputWriter.writerow([ricsSupplier, ricsSku, ricsSize + ' ' + ' ' + ricsWidth, '', amzSku, qty])
j += 1
i=0
# Checking if SKU is in form "X Y (ZZ)"
elif amzSku == skuEight:
if amzQty == ricsQty:
j += 1
i=0
elif amzQty == '':
j += 1
i=0
elif amzQty != ricsQty:
rics = (float(ricsQty))
amz = (float(amzQty))
qty = (rics - amz)
outputWriter.writerow([ricsSupplier, ricsSku, ricsSize + ' ' + ' ' + ricsWidth, '', amzSku, qty])
j += 1
i=0
# Checking if SKU is in form "X Y ZZZ"
elif amzSku == skuNine:
if amzQty == ricsQty:
j += 1
i=0
elif amzQty == '':
j += 1
i=0
elif amzQty != ricsQty:
rics = (float(ricsQty))
amz = (float(amzQty))
qty = (rics - amz)
outputWriter.writerow([ricsSupplier, ricsSku, ricsSize + ' ' + ' ' + ricsWidth, '', amzSku, qty])
j += 1
i=0
# Checking if SKU is in form "X YZZZ"
elif amzSku == skuTen:
if amzQty == ricsQty:
j += 1
i=0
elif amzQty == '':
j += 1
i=0
elif amzQty != ricsQty:
rics = (float(ricsQty))
amz = (float(amzQty))
qty = (rics - amz)
outputWriter.writerow([ricsSupplier, ricsSku, ricsSize + ' ' + ' ' + ricsWidth, '', amzSku, qty])
j += 1
i=0
# Checking if SKU is in form "X Y(ZZZ)"
elif amzSku == skuEleven:
if amzQty == ricsQty:
j += 1
i=0
elif amzQty == '':
j += 1
i=0
elif amzQty != ricsQty:
rics = (float(ricsQty))
amz = (float(amzQty))
qty = (rics - amz)
outputWriter.writerow([ricsSupplier, ricsSku, ricsSize + ' ' + ' ' + ricsWidth, '', amzSku, qty])
j += 1
i=0
# Checking if SKU is in form "X Y (ZZZ)"
elif amzSku == skuTwelve:
if amzQty == ricsQty:
j += 1
i=0
elif amzQty == '':
j += 1
i=0
elif amzQty != ricsQty:
rics = (float(ricsQty))
amz = (float(amzQty))
qty = (rics - amz)
outputWriter.writerow([ricsSupplier, ricsSku, ricsSize + ' ' + ' ' + ricsWidth, '', amzSku, qty])
j += 1
i=0
# If it finds none of those forms and it reaches the end, write the SKU into the CSV file for Amazon listing.
else:
if i == 6362:
outputWriter.writerow([ricsSupplier, ricsSku, ricsSize + ' ' + ' ' + ricsWidth])
j += 1
i=0
i += 1
print("Finished!")
outputFile.close() | 0.195364 | 0.197174 |
import os
import re
import shutil
import sys
import time
from optparse import OptionParser
sys.path.insert(1, re.sub(r'/\w*$', '', os.getcwd()))
import dirq # noqa E402
from dirq import queue # noqa E402
from dirq.QueueSimple import QueueSimple # noqa E402
opts = None
TEST = ''
ProgramName = sys.argv[0]
def init():
""" Initialize. """
global opts, TEST
parser = OptionParser(usage="%prog [OPTIONS] [--] TEST",
version=("%%prog %s" % dirq.VERSION))
parser.add_option('-d', '--debug', dest='debug', action="store_true",
default=False, help="show debugging information")
parser.add_option('-p', '--path', dest='path', type='string', default='',
help="set the queue path")
parser.add_option('-c', '--count', dest='count', type='int', default=0,
help="set the elements count")
parser.add_option("-s", "--size", dest="size", type='int', default=0,
help="set the body size for added elements")
parser.add_option("-r", "--random", dest="random", action="store_true",
default=False, help="randomize the body size")
parser.add_option("--granularity", dest="granularity", type="int",
default=None, help="time granularity for intermediate "
"directories (QueueSimple)")
parser.add_option("--header", dest="header", action="store_true",
default=False, help="set header for added elements")
parser.add_option("--maxelts", dest="maxelts", type='int', default=0,
help="set the maximum number of elements per directory")
parser.add_option("--type", dest="type", type="string", default="simple",
help="set the type of dirq (simple|normal)")
opts, args = parser.parse_args()
if not opts.path:
_die("%s: mandatory option not set: -p/--path", ProgramName)
if len(args) != 0:
TEST = args[0]
else:
parser.print_help()
sys.exit()
def debug(fmt, *arguments):
"""Report a debugging message.
"""
if not opts.debug:
return
message = fmt % arguments
message = re.sub(r'\s+$', '.', message)
sys.stderr.write("# %s %s[%d]: %s\n" %
(time.strftime("%Y/%m/%d-%H:%M:%S",
time.localtime(time.time())),
os.path.basename(sys.argv[0]),
os.getpid(), message))
def _die(fmt, *arguments):
"""Report a fatal error."""
sys.stderr.write(fmt % arguments + "\n")
sys.stderr.flush()
sys.exit(1)
def new_dirq(path, _schema):
"""Create a new Directory::Queue object, optionally with schema.
"""
kwargs = {}
if opts.type == "simple":
if opts.granularity is not None:
kwargs['granularity'] = opts.granularity
return QueueSimple(path, **kwargs)
else:
if _schema:
schema = {'body': 'string',
'header': 'table?'}
kwargs['schema'] = schema
if opts.maxelts:
kwargs['maxelts'] = opts.maxelts
return queue.Queue(path, **kwargs)
def new_dirqs():
"""Create a new Directory::Queue object, optionally with schema.
"""
time1 = time.time()
qs = queue.QueueSet()
for path in opts.path.split(','):
qs.add(new_dirq(path, 0))
debug("created queue set in %.4f seconds", time.time() - time1)
return qs
def test_count():
"""Count the elements in the queue.
"""
qs = new_dirqs()
time1 = time.time()
count = qs.count()
time2 = time.time()
debug("queue set has %d elements", count)
debug("done in %.4f seconds", time2 - time1)
def test_add():
"""Add elements to the queue.
"""
def test_complex():
"""Add elements to the queue.
"""
wd = opts.path
os.mkdir(wd)
qn = 6
paths = []
for i in range(qn):
paths.append(wd + '/q%i' % i)
count = opts.count or 1000
debug("creating %i initial queues. adding %i elements into each." %
(qn, count))
queues = []
t1 = time.time()
while qn:
dq = new_dirq(paths[qn - 1], 1)
debug("adding %d elements to the queue...", count)
element = {}
done = 0
time1 = time.time()
while not count or done < count:
done += 1
element['body'] = 'Element %i \u263A\n' % done
dq.add(element)
time2 = time.time()
debug("done in %.4f seconds", time2 - time1)
queues.append(dq)
qn -= 1
debug("total done in %.4f seconds", time.time() - t1)
time1 = time.time()
i = 3
qs = queue.QueueSet(queues[0:i])
debug("created queue set in %.4f seconds", time.time() - time1)
debug("elements in %i queues: %i" % (i, qs.count()))
debug("adding remaining queues to the set.")
t1 = time.time()
qs.add(queues[i:])
debug("done in %.4f sec." % (time.time() - t1))
debug("total element with added queues: %i" % qs.count())
debug("removing %i first queues." % i)
t1 = time.time()
for dq in queues[0:i]:
qs.remove(dq)
debug("done in %.4f sec." % (time.time() - t1))
debug("number of elements left: %i" % qs.count())
debug("deleting queues from disk...")
for path in paths:
shutil.rmtree(path, ignore_errors=True)
debug("done.")
def test_iterate():
"""Iterate through the set of queues (only lock+unlock).
"""
debug("iterating all elements in the set of queues (one pass)...")
qs = new_dirqs()
done = 0
time1 = time.time()
dq, name = qs.first()
while dq:
if not dq.lock(name):
dq, name = qs.next()
continue
dq.unlock(name)
done += 1
dq, name = qs.next()
time2 = time.time()
debug("done in %.4f seconds (%d elements)", time2 - time1, done)
if __name__ == "__main__":
init()
if TEST == "count":
test_count()
elif TEST == "add":
test_add()
elif TEST == "complex":
test_complex()
elif TEST == "iterate":
test_iterate()
else:
_die("%s: unsupported test: %s", ProgramName, TEST) | test/dqst.py | import os
import re
import shutil
import sys
import time
from optparse import OptionParser
sys.path.insert(1, re.sub(r'/\w*$', '', os.getcwd()))
import dirq # noqa E402
from dirq import queue # noqa E402
from dirq.QueueSimple import QueueSimple # noqa E402
opts = None
TEST = ''
ProgramName = sys.argv[0]
def init():
""" Initialize. """
global opts, TEST
parser = OptionParser(usage="%prog [OPTIONS] [--] TEST",
version=("%%prog %s" % dirq.VERSION))
parser.add_option('-d', '--debug', dest='debug', action="store_true",
default=False, help="show debugging information")
parser.add_option('-p', '--path', dest='path', type='string', default='',
help="set the queue path")
parser.add_option('-c', '--count', dest='count', type='int', default=0,
help="set the elements count")
parser.add_option("-s", "--size", dest="size", type='int', default=0,
help="set the body size for added elements")
parser.add_option("-r", "--random", dest="random", action="store_true",
default=False, help="randomize the body size")
parser.add_option("--granularity", dest="granularity", type="int",
default=None, help="time granularity for intermediate "
"directories (QueueSimple)")
parser.add_option("--header", dest="header", action="store_true",
default=False, help="set header for added elements")
parser.add_option("--maxelts", dest="maxelts", type='int', default=0,
help="set the maximum number of elements per directory")
parser.add_option("--type", dest="type", type="string", default="simple",
help="set the type of dirq (simple|normal)")
opts, args = parser.parse_args()
if not opts.path:
_die("%s: mandatory option not set: -p/--path", ProgramName)
if len(args) != 0:
TEST = args[0]
else:
parser.print_help()
sys.exit()
def debug(fmt, *arguments):
"""Report a debugging message.
"""
if not opts.debug:
return
message = fmt % arguments
message = re.sub(r'\s+$', '.', message)
sys.stderr.write("# %s %s[%d]: %s\n" %
(time.strftime("%Y/%m/%d-%H:%M:%S",
time.localtime(time.time())),
os.path.basename(sys.argv[0]),
os.getpid(), message))
def _die(fmt, *arguments):
"""Report a fatal error."""
sys.stderr.write(fmt % arguments + "\n")
sys.stderr.flush()
sys.exit(1)
def new_dirq(path, _schema):
"""Create a new Directory::Queue object, optionally with schema.
"""
kwargs = {}
if opts.type == "simple":
if opts.granularity is not None:
kwargs['granularity'] = opts.granularity
return QueueSimple(path, **kwargs)
else:
if _schema:
schema = {'body': 'string',
'header': 'table?'}
kwargs['schema'] = schema
if opts.maxelts:
kwargs['maxelts'] = opts.maxelts
return queue.Queue(path, **kwargs)
def new_dirqs():
"""Create a new Directory::Queue object, optionally with schema.
"""
time1 = time.time()
qs = queue.QueueSet()
for path in opts.path.split(','):
qs.add(new_dirq(path, 0))
debug("created queue set in %.4f seconds", time.time() - time1)
return qs
def test_count():
"""Count the elements in the queue.
"""
qs = new_dirqs()
time1 = time.time()
count = qs.count()
time2 = time.time()
debug("queue set has %d elements", count)
debug("done in %.4f seconds", time2 - time1)
def test_add():
"""Add elements to the queue.
"""
def test_complex():
"""Add elements to the queue.
"""
wd = opts.path
os.mkdir(wd)
qn = 6
paths = []
for i in range(qn):
paths.append(wd + '/q%i' % i)
count = opts.count or 1000
debug("creating %i initial queues. adding %i elements into each." %
(qn, count))
queues = []
t1 = time.time()
while qn:
dq = new_dirq(paths[qn - 1], 1)
debug("adding %d elements to the queue...", count)
element = {}
done = 0
time1 = time.time()
while not count or done < count:
done += 1
element['body'] = 'Element %i \u263A\n' % done
dq.add(element)
time2 = time.time()
debug("done in %.4f seconds", time2 - time1)
queues.append(dq)
qn -= 1
debug("total done in %.4f seconds", time.time() - t1)
time1 = time.time()
i = 3
qs = queue.QueueSet(queues[0:i])
debug("created queue set in %.4f seconds", time.time() - time1)
debug("elements in %i queues: %i" % (i, qs.count()))
debug("adding remaining queues to the set.")
t1 = time.time()
qs.add(queues[i:])
debug("done in %.4f sec." % (time.time() - t1))
debug("total element with added queues: %i" % qs.count())
debug("removing %i first queues." % i)
t1 = time.time()
for dq in queues[0:i]:
qs.remove(dq)
debug("done in %.4f sec." % (time.time() - t1))
debug("number of elements left: %i" % qs.count())
debug("deleting queues from disk...")
for path in paths:
shutil.rmtree(path, ignore_errors=True)
debug("done.")
def test_iterate():
"""Iterate through the set of queues (only lock+unlock).
"""
debug("iterating all elements in the set of queues (one pass)...")
qs = new_dirqs()
done = 0
time1 = time.time()
dq, name = qs.first()
while dq:
if not dq.lock(name):
dq, name = qs.next()
continue
dq.unlock(name)
done += 1
dq, name = qs.next()
time2 = time.time()
debug("done in %.4f seconds (%d elements)", time2 - time1, done)
if __name__ == "__main__":
init()
if TEST == "count":
test_count()
elif TEST == "add":
test_add()
elif TEST == "complex":
test_complex()
elif TEST == "iterate":
test_iterate()
else:
_die("%s: unsupported test: %s", ProgramName, TEST) | 0.322633 | 0.120077 |
import os
import json
import argparse
import random
import numpy as np
from scoring import inception
def get_args():
parser = argparse.ArgumentParser()
# Oft-changed parameters
parser.add_argument('-d', '--data_set', type=str, default='cifar', help='Can be either cifar|imagenet')
parser.add_argument('-pd', '--preds_path', required=True, type=str, default=None, help='The filepath to our predictions, or where to save them')
# Defaulted parameters
parser.add_argument('-i', '--data_dir', type=str, default='../data', help='Location for the dataset')
parser.add_argument('-np', '--num_predictions_', type=int, default=None, help='Num predictions to generate')
parser.add_argument('-ns', '--num_splits_', type=int, default=1, help='Num splits for the inception score')
args = parser.parse_args()
print('input args:\n', json.dumps(vars(args), indent=4, separators=(',',':'))) # pretty print args
return args
if __name__ == "__main__":
args = get_args()
preds_path = args.preds_path
if os.path.exists(preds_path):
print('loading predictions from {}...'.format(preds_path))
preds = np.load(preds_path)['preds']
else:
if args.data_set == 'imagenet':
import data.imagenet_data as imagenet_data
DataLoader = imagenet_data.DataLoader
elif args.data_set == 'cifar':
import data.cifar10_data as cifar10_data
DataLoader = cifar10_data.DataLoader
else:
print('data_set (-d) must be either cifar|imagenet')
exit(1)
print('loading samples from {}|{}...'.format(args.data_dir, args.data_set))
train_data = DataLoader(args.data_dir, 'train', 100, shuffle=False, return_labels=False)
samples = train_data.data
samples = list(samples)
random.shuffle(samples)
if args.num_predictions_:
samples = samples[:args.num_predictions_]
print(np.min(samples[0]))
print(np.max(samples[0]))
# process = lambda img: ((img + 1) * 255 / 2).astype('uint8')
# samples = [process(s) for s in samples]
print('getting predictions on {} samples...'.format(len(samples)))
preds = inception.get_inception_preds(samples)
print('saving predictions to {} ...'.format(preds_path))
np.savez(preds_path, preds=preds)
if args.num_predictions_:
preds = preds[:args.num_predictions_]
print('getting inception score on {} predictions with {} splits...'.format(len(list(preds)), args.num_splits_))
mean, var = inception.get_inception_score_from_preds(preds, splits=args.num_splits_)
print('inception score: mean={}, variance={}'.format(mean, var)) | get_inception_score_with_dataloader.py | import os
import json
import argparse
import random
import numpy as np
from scoring import inception
def get_args():
parser = argparse.ArgumentParser()
# Oft-changed parameters
parser.add_argument('-d', '--data_set', type=str, default='cifar', help='Can be either cifar|imagenet')
parser.add_argument('-pd', '--preds_path', required=True, type=str, default=None, help='The filepath to our predictions, or where to save them')
# Defaulted parameters
parser.add_argument('-i', '--data_dir', type=str, default='../data', help='Location for the dataset')
parser.add_argument('-np', '--num_predictions_', type=int, default=None, help='Num predictions to generate')
parser.add_argument('-ns', '--num_splits_', type=int, default=1, help='Num splits for the inception score')
args = parser.parse_args()
print('input args:\n', json.dumps(vars(args), indent=4, separators=(',',':'))) # pretty print args
return args
if __name__ == "__main__":
args = get_args()
preds_path = args.preds_path
if os.path.exists(preds_path):
print('loading predictions from {}...'.format(preds_path))
preds = np.load(preds_path)['preds']
else:
if args.data_set == 'imagenet':
import data.imagenet_data as imagenet_data
DataLoader = imagenet_data.DataLoader
elif args.data_set == 'cifar':
import data.cifar10_data as cifar10_data
DataLoader = cifar10_data.DataLoader
else:
print('data_set (-d) must be either cifar|imagenet')
exit(1)
print('loading samples from {}|{}...'.format(args.data_dir, args.data_set))
train_data = DataLoader(args.data_dir, 'train', 100, shuffle=False, return_labels=False)
samples = train_data.data
samples = list(samples)
random.shuffle(samples)
if args.num_predictions_:
samples = samples[:args.num_predictions_]
print(np.min(samples[0]))
print(np.max(samples[0]))
# process = lambda img: ((img + 1) * 255 / 2).astype('uint8')
# samples = [process(s) for s in samples]
print('getting predictions on {} samples...'.format(len(samples)))
preds = inception.get_inception_preds(samples)
print('saving predictions to {} ...'.format(preds_path))
np.savez(preds_path, preds=preds)
if args.num_predictions_:
preds = preds[:args.num_predictions_]
print('getting inception score on {} predictions with {} splits...'.format(len(list(preds)), args.num_splits_))
mean, var = inception.get_inception_score_from_preds(preds, splits=args.num_splits_)
print('inception score: mean={}, variance={}'.format(mean, var)) | 0.341912 | 0.108236 |
import pickle
import numpy as np
import numpy.linalg as la
from numpy.random import default_rng
# Create the random number generator
rng = default_rng()
class Organization(object):
"""Defines a class Organization which contains an organization network
structure (a.k.a. an organizational form) populated with agents."""
def __init__(self, struct="tree"):
"""Creates an instance of class Organization with a specified structure
and corresponding parameters for that structure. The default is a
standard tree organizational form.
Parameters
----------
struct : STRING, optional
Defines the form or structure of the organization. The
default is "tree".
pops : Population, required
One or more populations provided to the organization in an array of
populations.
Returns
-------
None.
"""
# Set org structure
self.struct = struct
# Create network graph of organization
if self.struct == "tree":
# Load organization, parents, and siblings from file
self.org = pickle.load(open("cliquetree_org.pickle","rb"))
self.A_pars = pickle.load(open("cliquetree_parents.pickle","rb"))
self.A_sibs = pickle.load(open("cliquetree_siblings.pickle","rb"))
# Define other relationships
self.A_gpars = np.matmul(self.A_pars,self.A_pars)
self.A_kids = np.transpose(self.A_pars)
self.A_gkids = np.matmul(self.A_kids,self.A_kids)
# Correct grandparent relationship for those without grandparents
self.A_gpars[0:6,0] = np.ones((6))
else:
print("Input 'struct' for 'Organization' is not valid.")
"""Population Variables"""
self.pops = [] # List of populations for the org
self.from_pop = [] # Array of populations that current employees are from
"""Network Count Parameters"""
# For nodes, parents, grandparents, siblings,kids, and grandkids. No
# values are allowed to be zero because they're mostly used as
# divisors and the matrices will be zero in those cases.
self.n_nodes = len(self.org.nodes())
self.id = np.identity(self.n_nodes)
self.norm_pars = np.divide(self.id,np.sum(self.A_pars,axis=1) \
+ np.array(np.sum(self.A_pars,axis=1) == 0))
self.norm_gpars = np.divide(self.id,np.sum(self.A_gpars,axis=1) \
+ np.array(np.sum(self.A_gpars,axis=1) == 0))
self.norm_sibs = np.divide(self.id,np.sum(self.A_sibs,axis=1) \
+ np.array(np.sum(self.A_sibs,axis=1) == 0))
self.norm_kids = np.divide(self.id,np.sum(self.A_kids,axis=1) \
+ np.array(np.sum(self.A_kids,axis=1) == 0))
self.norm_gkids = np.divide(self.id,np.sum(self.A_gkids,axis=1) \
+ np.array(np.sum(self.A_gkids,axis=1) == 0))
"""Unit Vectors"""
self.unit_x = np.array([1,0,0])
self.unit_y = np.array([0,1,0])
self.unit_z = np.array([0,0,1])
"""Normalizing Parameters"""
# Normalization divisors for socialization, branch, and promotion calcs
self.norm_soc = np.divide(self.id,np.ones([self.n_nodes,1]) \
+ np.array(np.sum(self.A_pars,axis=1) > 0) \
+ np.array(np.sum(self.A_sibs,axis=1) > 0))
self.norm_branch = np.divide(self.id,np.ones([self.n_nodes,1]) \
+ np.array(np.sum(self.A_kids,axis=1) > 0))
self.norm_prom = np.divide(self.id,np.ones([self.n_nodes,1]) \
+ np.array(np.sum(self.A_gpars,axis=1) > 0))
"""Culture Parameters & Variables"""
self.n_cultatt = 3 # Number of culture attributes
self.index_sim = 0 # Similarity index
self.index_perf = 1 # Performance index
self.index_inc = 2 # Inclusiveness index
self.culture = np.empty([self.n_nodes,self.n_cultatt])
"""Performance Parameters & Variables"""
self.n_perfatt = 2 # Number of performance attributes for beta fns
self.index_mean = 0 # Performance mean index
self.index_disp = 1 # Performance dispersion (like variance) index
self.perf_params = np.zeros([self.n_nodes,self.n_perfatt])
self.perf_indiv = np.zeros([self.n_nodes,])
self.perf_branch = np.zeros([self.n_nodes,])
"""Promotion Parameters & Variables"""
self.prom_fit = np.zeros([self.n_nodes,self.n_cultatt])
self.prom_score = np.zeros([self.n_nodes,])
"""Retirement Parameters & Variables"""
self.n_retire_opts = 2
self.retire_prob = 0.2
self.empty_positions = np.zeros([self.n_nodes,])
def __repr__(self):
"""Returns a representation of the organization"""
return self.__class__.__name__
def fill_org(self, pops):
"""
Populates the culture and performance parameters for each member of the
organization given a set of populations.
Parameters
----------
pops : Population Array
An array of one or more populations for use in organization
initialization and hiring.
Returns
-------
None.
"""
# Add populations to the organization for hiring
self.pops = pops
self.add_employee()
# Initialize structures for populating culture
self.add_culture()
self.social = self.culture
# Initialize sutructures for populating performance
self.add_performance()
def add_employee(self, loc=-1):
"""Adds one or more employees to the organization by sampling from
population probabilities. Either creates one employee at a location
(loc) or all employees (-1)."""
if loc > -1:
self.from_pop[loc] = rng.choice(a=len(self.pops),
p=[self.pops[ii].rep_gen for ii in np.arange(len(self.pops))])
else: # assume all nodes
self.from_pop = rng.choice(a=len(self.pops),size=self.n_nodes,
p=[self.pops[ii].rep_start for ii \
in np.arange(len(self.pops))])
def add_culture(self,loc=-1):
"""Creates culture matrix for all the nodes from populations (-1), or
adds culture for a single specified node (loc).
CULTURE DETERMINATION RULES:
x = similarity [0,1]
y = performance [0,1]
z = inclusiveness [0,1]
FOR MCC SIM:
x + y + z = 1
x = y
x = (1 - z)/2
Therefore, sample z, calculate x & y from z
"""
# Generate range of nodes to update
if loc > -1: # Just get the one node
node_range = np.arange(loc,loc+1)
else: # Get all nodes
node_range = np.arange(self.n_nodes)
# Generate culture values by first cycling through the nodes
for ii in node_range:
# CASE 1: uniform_2var
if self.pops[self.from_pop[ii]].aff_dist == "uniform_2var":
# Sample z from a LINEAR UNIFORM distribution
self.culture[ii,:] = np.array([linear_uniform()])
# CASE 2: beta_2var
elif self.pops[self.from_pop[ii]].aff_dist == "beta_2var":
# Sample z form a LINEAR BETA distribution with mean at
# aff_inc and var at aff_var.
self.culture[ii,:] = np.array([linear_beta(
self.pops[self.from_pop[ii]].aff_inc,
self.pops[self.from_pop[ii]].aff_var,
)])
# CASE 3: beta_3var
elif self.pops[self.from_pop[ii]].aff_dist == "beta_3var":
# Sample x from a TRIANGULAR BETA distribution with means at
# aff_sim & aff_perf, and both vars at aff_var.
self.culture[ii,:] = np.array([triangle_beta(
self.pops[self.from_pop[ii]].aff_sim,
self.pops[self.from_pop[ii]].aff_var,
self.pops[self.from_pop[ii]].aff_perf,
self.pops[self.from_pop[ii]].aff_var,
)])
# CASE 4: "uniform_3var"
else:
# Sample z from a TRIANGULAR UNIFORM distribution
self.culture[ii,:] = np.array([triangle_uniform()])
def add_performance(self,loc=-1):
"""Adds performance matrix for either one (loc) or all (-1) nodes
from the populations."""
# Generate range of nodes to update
if loc > -1: # Just get the one node
node_range = np.arange(loc,loc+1)
else: # Get all nodes
node_range = np.arange(self.n_nodes)
# Generate performance values by cycling through the nodes
for ii in node_range:
# Draw a performance distribution mean for each employee
beta_a, beta_b = beta(self.pops[self.from_pop[ii]].perf_mean,
self.pops[self.from_pop[ii]].perf_var)
self.perf_params[ii,self.index_mean] = rng.beta(beta_a, beta_b)
# Set performance dispersion for each employee
self.perf_params[ii,self.index_disp] = \
self.pops[self.from_pop[ii]].perf_var
def org_step(self,n_steps = 1):
"""Steps the organization forward in time a specified number of steps,
and otherwise defaults to one step. Assumes that the organization has
already been filled."""
# Create history structure for the number of nodes and steps
self.history = History(n_steps,self.n_nodes,self.n_cultatt,
self.n_perfatt)
for st in np.arange(n_steps):
# Socialize agents
self.socialize()
# Update individual performances
self.perform_individuals()
# Calculate branch performances by reverse iteration
self.perform_branches()
# Calculate promotion fitnesses & scores
self.calc_promotion_fitness()
self.calc_promotion_scores()
# Record History from turn (promotion/hiring reflect in next step)
self.history.record_history(st, self.from_pop, self.culture,
self.social, self.perf_params, self.perf_indiv,
self.perf_branch, self.perf_org, self.prom_fit,
self.prom_score)
# Perform retirement
self.gen_retire()
# Perform promotion & hiring
self.emp_fill()
def socialize(self):
"""Socialization function."""
term_pars = np.matmul(self.norm_pars,
np.matmul(self.A_pars,self.social))
term_sibs = np.matmul(self.norm_sibs,
np.matmul(self.A_sibs,self.social))
self.social = np.matmul(self.norm_soc,
self.culture + term_pars + term_sibs)
def perform_individuals(self):
"""Generate performance of individuals"""
# Generate performance values by first cycling through the nodes
for ii in np.arange(self.n_nodes):
# Next, check its distribution type
if self.pops[self.from_pop[ii]].aff_dist == "uniform":
# Sample perf_indiv from a UNIFORM distribution
self.perf_indiv[ii] = rng.uniform()
else: # Otherwise defaults to beta distribution
# Else sample perf_indiv from a BETA distribution
beta_a, beta_b = beta(self.perf_params[ii,self.index_mean],
self.perf_params[ii,self.index_disp])
self.perf_indiv[ii] = rng.beta(beta_a, beta_b)
def perform_branches(self):
"""Generate performance for branches in reverse. NOTE: Currently
calculated in reverse from last created node to first node to ensure
that parent nodes include branch performances of children."""
# Calculate branch performance values by first cycling through nodes
for ii in np.arange(self.n_nodes-1,-1,-1):
# Calculate branch performance for each node
term_kids = self.norm_kids[ii,ii] \
* np.matmul(self.A_kids[ii,:],self.perf_branch)
self.perf_branch[ii] = self.norm_branch[ii,ii] \
* (self.perf_indiv[ii] + term_kids)
# Calculate org performance by taking root node's branch performance.
# Value comes from term_kids because loops above is reversed.
self.perf_org = term_kids
def calc_promotion_fitness(self):
"""Calculates the promotion fitness for each node. NOTE: Currently
calculates similarity term as an average of the culture of all parents,
which may not be appropriate for all promotion algorithms."""
# Calculate vectors for populating promotion fitness matrix
term_sim = np.ones(self.n_nodes) \
- la.norm(x = np.matmul(self.norm_gpars,
np.matmul(self.A_gpars,self.social)) \
- self.social,axis = 1)
term_perf = self.perf_branch
term_inc = np.matmul(self.culture,self.unit_z)
# Compile promotion fitness matrix
self.prom_fit = np.stack((term_sim,term_perf,term_inc),axis=-1)
def calc_promotion_scores(self):
"""Calculates the promotion score for each node. Make sure to use the
copy() method if using np.diag or np.diagonal, which returns a read/
write view starting with NumPy 1.10."""
self.prom_score = np.diag(np.matmul(np.matmul(
self.A_gpars,self.social),np.transpose(self.prom_fit))).copy()
def gen_retire(self):
"""Generates the members of the population to retire with a specified
probability."""
self.empty_positions = rng.choice(a=self.n_retire_opts,
size=self.n_nodes, p=[1-self.retire_prob,self.retire_prob])
def emp_fill(self):
"""Promote non-retiring member into openings from grandchildren."""
# Loop through nodes from top down to find the ones that are empty
for ii in np.arange(self.n_nodes):
# Only perform actions for empty positions
if self.empty_positions[ii] == 1:
# Reset potentially promotable options
filled = False
A_prom = self.A_kids
# Loop through until the empty position has been filled
while not(filled):
# If employees exist in the selected generation
if np.sum(A_prom[ii,:])>0:
# If at least one employee is promotable
if np.dot(A_prom[ii,:],1-self.empty_positions)>0:
# Get the location of the most qualified employee
emp_to_prom = np.argmax(A_prom[ii,:] \
* self.prom_score * (1 - self.empty_positions))
# Promote that employee
self.emp_prom(emp_to_prom,ii)
filled = True
# Otherwise, no employees in generation are promotable
else:
# So go to the next generation (get children)
A_prom = self.A_kids @ A_prom
# No employees exist in generation (no children)
else:
# So hire a new employee to the position
self.emp_hire(ii)
filled = True
def emp_prom(self,loc_from,loc_to):
"""Promote an employee from one location to another."""
# Populate new location
self.culture[loc_to,:] = self.culture[loc_from,:]
self.social[loc_to,:] = self.social[loc_from,:]
self.from_pop[loc_to] = self.from_pop[loc_from]
self.perf_params[loc_to,:] = self.perf_params[loc_from,:]
# Clear original location
self.culture[loc_from,:] = np.zeros(self.n_cultatt)
self.from_pop[loc_from] = -1
self.perf_branch[loc_from] = 0
self.perf_indiv[loc_from] = 0
self.perf_params[loc_from,:] = np.zeros(self.n_perfatt)
self.prom_fit[loc_from,:] = np.zeros(self.n_cultatt)
self.prom_score[loc_from] = 0
self.social[loc_from,:] = np.zeros(self.n_cultatt)
# Set location as needing to be filled
self.empty_positions[loc_from] = 1
def emp_hire(self,loc_to):
"""Hire new employees into opening by population sampling."""
# Pick a new employee from possible populations
self.add_employee(loc_to)
# Generate initial culture for that employee
self.add_culture(loc_to)
self.social[loc_to,:] = self.culture[loc_to,:]
# Generate performance parameters for that employee
self.add_performance(loc_to)
# Set all performance values to zero for now
self.perf_branch[loc_to] = 0
self.perf_indiv[loc_to] = 0
self.prom_fit[loc_to,:] = np.zeros(self.n_cultatt)
self.prom_score[loc_to] = 0
def return_results(self):
"""Return the history of the organization."""
return self.history
class Population(object):
"""Defines an instances of class population from which the organization can
sample either to start or as new hires."""
def __init__(self,starting=1,hires=1,aff_dist="beta_2var",aff_sim=0.25,
aff_perf=0.25,aff_inc=0.5,aff_var=15,perf_dist="beta",
perf_mean=0.5,perf_var=15):
"""
Initializes an instance of class population.
Parameters
----------
starting : [0,1], optional
Specifies the probability that a member of the starting organization
will be from this population. All probabilities must sum to 1. The
default is 1.
hires : [0,1], optional
Specifies the probability that a new hire will will be from this
population. All probabilities must sum to 1. The default is 1.
aff_dist : STRING, optional
The culture distribution type of the population, either "beta" or
"uniform". The default is "beta".
aff_sim : [0.1,0.9], optional
The mean of the sampling distribution for an agent's affinity for
cultural similarity. Applies to only beta distributions. The default
is 0.25.
aff_perf : [0.1,0.9], optional
The mean of the sampling distribution for an agent's affinity for
performance. Applies to only beta distributions. The default is
0.25.
aff_inc : [0.1,0.9], optional
The mean of the sampling distribution for an agent's affinity for
inclusiveness. Applies to only beta distributions. The default is
0.25.
aff_var : [0.1,0.9], optional
The variance of the culture beta distribution. Applies only to beta
distributions. The default is 15.
perf_dist : STRING, optional
The performance distribution type of the population, either "beta"
or "uniform". The default is "beta".
perf_mean : [0.1,0.9], optional
The mean of the sampling distribution for an agent's performance.
Applies only to beta distributions. The default is 0.5.
perf_var : (0,inf), optional
The variance of the performance beta distribution. Applies only to
beta distributions. The default is 15.
Returns
-------
None.
"""
self.rep_start = starting
self.rep_gen = hires
self.aff_dist = aff_dist
self.aff_sim = aff_sim
self.aff_perf = aff_perf
self.aff_inc = aff_inc
self.aff_var = aff_var
self.perf_dist = perf_dist
self.perf_mean = perf_mean
self.perf_var = perf_var
class History(object):
"""Instance of a history structure for holding results. Contains structures
for demographics, culture (including similarity, performance, and
inclusiveness), socialization (including similarity, performance, and
inclusiveness), and performance (including individual and branch scores)."""
def __init__(self,n_steps,n_nodes,n_cultatt,n_perfatt):
# Create organization history arrays and dictionaries
self.demographics = np.zeros((n_steps,n_nodes))
self.culture = np.zeros((n_steps,n_nodes,n_cultatt))
self.socialization = np.zeros((n_steps,n_nodes,n_cultatt))
self.performance_params = np.zeros((n_steps,n_nodes,n_perfatt))
self.performance_indiv = np.zeros((n_steps,n_nodes))
self.performance_branch = np.zeros((n_steps,n_nodes))
self.performance_org = np.zeros((n_steps,))
self.promotion_fitness = np.zeros((n_steps,n_nodes,n_cultatt))
self.promotion_score = np.zeros((n_steps,n_nodes))
def record_history(self,step,demo,cult,soc,perf_par,perf_ind,perf_bra,
perf_org,prom_fit,prom_sco):
self.demographics[step,:] = demo.copy()
self.culture[step,:,:] = cult.copy()
self.socialization[step,:,:] = soc.copy()
self.performance_params[step,:,:] = perf_par.copy()
self.performance_indiv[step,:] = perf_ind.copy()
self.performance_branch[step,:] = perf_bra.copy()
self.performance_org[step] = perf_org.copy()
self.promotion_fitness[step,:,:] = prom_fit.copy()
self.promotion_score[step,:] = prom_sco.copy()
def beta(mu,phi):
"""Transforms beta function parameters from average and variance form to
the alpha & beta parameters"""
a = mu*phi
b = (1-mu)*phi
return a, b
def linear_uniform():
"""Generates one uniformly distributed random value and calculates two
other equal values, the three of which sum to one (2x + z = 1). First
transforms the mu and phi into a and b parameters for the beta function."""
z = rng.uniform()
x = (1 - z)/2
y = x
return x, y, z
def linear_beta(mu,phi):
"""Generates one beta distributed random value and calculates two other
equal values, the three of which sum to one (2x + z = 1). First transforms
the mu and phi into a and b parameters for the beta function."""
a, b = beta(mu,phi)
z = rng.beta(a, b)
x = (1 - z)/2
y = x
return x, y, z
def triangle_uniform():
"""Generates three uniformly random values that sum to one via triangle
point picking (see the following website for more details on the math:
https://mathworld.wolfram.com/TrianglePointPicking.html), Randomly draws
two values x and y on [0,1] and converts any values of x and y such that
x + y > 1 into values such that x + y < 1."""
x = rng.uniform()
y = rng.uniform()
if x + y > 1:
x = 1 - x
y = 1 - y
z = 1 - x - y
return x, y, z
def triangle_beta(mu1,phi1,mu2,phi2):
"""Generates three beta distributed random values that sum to one via
triangle point picking (see the following website for more details on the
math: https://mathworld.wolfram.com/TrianglePointPicking.html), Randomly
draws two values x and y on [0,1] and converts any values of x and y such
that x + y > 1 into values such that x + y < 1."""
a1, b1 = beta(mu1,phi1)
a2, b2 = beta(mu2,phi2)
valid = False
while not(valid):
x = rng.beta(a1,b1)
y = rng.beta(a2,b2)
if x + y <= 1:
valid = True
z = 1 - x - y
return x, y, z
if __name__ == '__main__':
org_test = Organization() | Organization.py | import pickle
import numpy as np
import numpy.linalg as la
from numpy.random import default_rng
# Create the random number generator
rng = default_rng()
class Organization(object):
"""Defines a class Organization which contains an organization network
structure (a.k.a. an organizational form) populated with agents."""
def __init__(self, struct="tree"):
"""Creates an instance of class Organization with a specified structure
and corresponding parameters for that structure. The default is a
standard tree organizational form.
Parameters
----------
struct : STRING, optional
Defines the form or structure of the organization. The
default is "tree".
pops : Population, required
One or more populations provided to the organization in an array of
populations.
Returns
-------
None.
"""
# Set org structure
self.struct = struct
# Create network graph of organization
if self.struct == "tree":
# Load organization, parents, and siblings from file
self.org = pickle.load(open("cliquetree_org.pickle","rb"))
self.A_pars = pickle.load(open("cliquetree_parents.pickle","rb"))
self.A_sibs = pickle.load(open("cliquetree_siblings.pickle","rb"))
# Define other relationships
self.A_gpars = np.matmul(self.A_pars,self.A_pars)
self.A_kids = np.transpose(self.A_pars)
self.A_gkids = np.matmul(self.A_kids,self.A_kids)
# Correct grandparent relationship for those without grandparents
self.A_gpars[0:6,0] = np.ones((6))
else:
print("Input 'struct' for 'Organization' is not valid.")
"""Population Variables"""
self.pops = [] # List of populations for the org
self.from_pop = [] # Array of populations that current employees are from
"""Network Count Parameters"""
# For nodes, parents, grandparents, siblings,kids, and grandkids. No
# values are allowed to be zero because they're mostly used as
# divisors and the matrices will be zero in those cases.
self.n_nodes = len(self.org.nodes())
self.id = np.identity(self.n_nodes)
self.norm_pars = np.divide(self.id,np.sum(self.A_pars,axis=1) \
+ np.array(np.sum(self.A_pars,axis=1) == 0))
self.norm_gpars = np.divide(self.id,np.sum(self.A_gpars,axis=1) \
+ np.array(np.sum(self.A_gpars,axis=1) == 0))
self.norm_sibs = np.divide(self.id,np.sum(self.A_sibs,axis=1) \
+ np.array(np.sum(self.A_sibs,axis=1) == 0))
self.norm_kids = np.divide(self.id,np.sum(self.A_kids,axis=1) \
+ np.array(np.sum(self.A_kids,axis=1) == 0))
self.norm_gkids = np.divide(self.id,np.sum(self.A_gkids,axis=1) \
+ np.array(np.sum(self.A_gkids,axis=1) == 0))
"""Unit Vectors"""
self.unit_x = np.array([1,0,0])
self.unit_y = np.array([0,1,0])
self.unit_z = np.array([0,0,1])
"""Normalizing Parameters"""
# Normalization divisors for socialization, branch, and promotion calcs
self.norm_soc = np.divide(self.id,np.ones([self.n_nodes,1]) \
+ np.array(np.sum(self.A_pars,axis=1) > 0) \
+ np.array(np.sum(self.A_sibs,axis=1) > 0))
self.norm_branch = np.divide(self.id,np.ones([self.n_nodes,1]) \
+ np.array(np.sum(self.A_kids,axis=1) > 0))
self.norm_prom = np.divide(self.id,np.ones([self.n_nodes,1]) \
+ np.array(np.sum(self.A_gpars,axis=1) > 0))
"""Culture Parameters & Variables"""
self.n_cultatt = 3 # Number of culture attributes
self.index_sim = 0 # Similarity index
self.index_perf = 1 # Performance index
self.index_inc = 2 # Inclusiveness index
self.culture = np.empty([self.n_nodes,self.n_cultatt])
"""Performance Parameters & Variables"""
self.n_perfatt = 2 # Number of performance attributes for beta fns
self.index_mean = 0 # Performance mean index
self.index_disp = 1 # Performance dispersion (like variance) index
self.perf_params = np.zeros([self.n_nodes,self.n_perfatt])
self.perf_indiv = np.zeros([self.n_nodes,])
self.perf_branch = np.zeros([self.n_nodes,])
"""Promotion Parameters & Variables"""
self.prom_fit = np.zeros([self.n_nodes,self.n_cultatt])
self.prom_score = np.zeros([self.n_nodes,])
"""Retirement Parameters & Variables"""
self.n_retire_opts = 2
self.retire_prob = 0.2
self.empty_positions = np.zeros([self.n_nodes,])
def __repr__(self):
"""Returns a representation of the organization"""
return self.__class__.__name__
def fill_org(self, pops):
"""
Populates the culture and performance parameters for each member of the
organization given a set of populations.
Parameters
----------
pops : Population Array
An array of one or more populations for use in organization
initialization and hiring.
Returns
-------
None.
"""
# Add populations to the organization for hiring
self.pops = pops
self.add_employee()
# Initialize structures for populating culture
self.add_culture()
self.social = self.culture
# Initialize sutructures for populating performance
self.add_performance()
def add_employee(self, loc=-1):
"""Adds one or more employees to the organization by sampling from
population probabilities. Either creates one employee at a location
(loc) or all employees (-1)."""
if loc > -1:
self.from_pop[loc] = rng.choice(a=len(self.pops),
p=[self.pops[ii].rep_gen for ii in np.arange(len(self.pops))])
else: # assume all nodes
self.from_pop = rng.choice(a=len(self.pops),size=self.n_nodes,
p=[self.pops[ii].rep_start for ii \
in np.arange(len(self.pops))])
def add_culture(self,loc=-1):
"""Creates culture matrix for all the nodes from populations (-1), or
adds culture for a single specified node (loc).
CULTURE DETERMINATION RULES:
x = similarity [0,1]
y = performance [0,1]
z = inclusiveness [0,1]
FOR MCC SIM:
x + y + z = 1
x = y
x = (1 - z)/2
Therefore, sample z, calculate x & y from z
"""
# Generate range of nodes to update
if loc > -1: # Just get the one node
node_range = np.arange(loc,loc+1)
else: # Get all nodes
node_range = np.arange(self.n_nodes)
# Generate culture values by first cycling through the nodes
for ii in node_range:
# CASE 1: uniform_2var
if self.pops[self.from_pop[ii]].aff_dist == "uniform_2var":
# Sample z from a LINEAR UNIFORM distribution
self.culture[ii,:] = np.array([linear_uniform()])
# CASE 2: beta_2var
elif self.pops[self.from_pop[ii]].aff_dist == "beta_2var":
# Sample z form a LINEAR BETA distribution with mean at
# aff_inc and var at aff_var.
self.culture[ii,:] = np.array([linear_beta(
self.pops[self.from_pop[ii]].aff_inc,
self.pops[self.from_pop[ii]].aff_var,
)])
# CASE 3: beta_3var
elif self.pops[self.from_pop[ii]].aff_dist == "beta_3var":
# Sample x from a TRIANGULAR BETA distribution with means at
# aff_sim & aff_perf, and both vars at aff_var.
self.culture[ii,:] = np.array([triangle_beta(
self.pops[self.from_pop[ii]].aff_sim,
self.pops[self.from_pop[ii]].aff_var,
self.pops[self.from_pop[ii]].aff_perf,
self.pops[self.from_pop[ii]].aff_var,
)])
# CASE 4: "uniform_3var"
else:
# Sample z from a TRIANGULAR UNIFORM distribution
self.culture[ii,:] = np.array([triangle_uniform()])
def add_performance(self,loc=-1):
"""Adds performance matrix for either one (loc) or all (-1) nodes
from the populations."""
# Generate range of nodes to update
if loc > -1: # Just get the one node
node_range = np.arange(loc,loc+1)
else: # Get all nodes
node_range = np.arange(self.n_nodes)
# Generate performance values by cycling through the nodes
for ii in node_range:
# Draw a performance distribution mean for each employee
beta_a, beta_b = beta(self.pops[self.from_pop[ii]].perf_mean,
self.pops[self.from_pop[ii]].perf_var)
self.perf_params[ii,self.index_mean] = rng.beta(beta_a, beta_b)
# Set performance dispersion for each employee
self.perf_params[ii,self.index_disp] = \
self.pops[self.from_pop[ii]].perf_var
def org_step(self,n_steps = 1):
"""Steps the organization forward in time a specified number of steps,
and otherwise defaults to one step. Assumes that the organization has
already been filled."""
# Create history structure for the number of nodes and steps
self.history = History(n_steps,self.n_nodes,self.n_cultatt,
self.n_perfatt)
for st in np.arange(n_steps):
# Socialize agents
self.socialize()
# Update individual performances
self.perform_individuals()
# Calculate branch performances by reverse iteration
self.perform_branches()
# Calculate promotion fitnesses & scores
self.calc_promotion_fitness()
self.calc_promotion_scores()
# Record History from turn (promotion/hiring reflect in next step)
self.history.record_history(st, self.from_pop, self.culture,
self.social, self.perf_params, self.perf_indiv,
self.perf_branch, self.perf_org, self.prom_fit,
self.prom_score)
# Perform retirement
self.gen_retire()
# Perform promotion & hiring
self.emp_fill()
def socialize(self):
"""Socialization function."""
term_pars = np.matmul(self.norm_pars,
np.matmul(self.A_pars,self.social))
term_sibs = np.matmul(self.norm_sibs,
np.matmul(self.A_sibs,self.social))
self.social = np.matmul(self.norm_soc,
self.culture + term_pars + term_sibs)
def perform_individuals(self):
"""Generate performance of individuals"""
# Generate performance values by first cycling through the nodes
for ii in np.arange(self.n_nodes):
# Next, check its distribution type
if self.pops[self.from_pop[ii]].aff_dist == "uniform":
# Sample perf_indiv from a UNIFORM distribution
self.perf_indiv[ii] = rng.uniform()
else: # Otherwise defaults to beta distribution
# Else sample perf_indiv from a BETA distribution
beta_a, beta_b = beta(self.perf_params[ii,self.index_mean],
self.perf_params[ii,self.index_disp])
self.perf_indiv[ii] = rng.beta(beta_a, beta_b)
def perform_branches(self):
"""Generate performance for branches in reverse. NOTE: Currently
calculated in reverse from last created node to first node to ensure
that parent nodes include branch performances of children."""
# Calculate branch performance values by first cycling through nodes
for ii in np.arange(self.n_nodes-1,-1,-1):
# Calculate branch performance for each node
term_kids = self.norm_kids[ii,ii] \
* np.matmul(self.A_kids[ii,:],self.perf_branch)
self.perf_branch[ii] = self.norm_branch[ii,ii] \
* (self.perf_indiv[ii] + term_kids)
# Calculate org performance by taking root node's branch performance.
# Value comes from term_kids because loops above is reversed.
self.perf_org = term_kids
def calc_promotion_fitness(self):
"""Calculates the promotion fitness for each node. NOTE: Currently
calculates similarity term as an average of the culture of all parents,
which may not be appropriate for all promotion algorithms."""
# Calculate vectors for populating promotion fitness matrix
term_sim = np.ones(self.n_nodes) \
- la.norm(x = np.matmul(self.norm_gpars,
np.matmul(self.A_gpars,self.social)) \
- self.social,axis = 1)
term_perf = self.perf_branch
term_inc = np.matmul(self.culture,self.unit_z)
# Compile promotion fitness matrix
self.prom_fit = np.stack((term_sim,term_perf,term_inc),axis=-1)
def calc_promotion_scores(self):
"""Calculates the promotion score for each node. Make sure to use the
copy() method if using np.diag or np.diagonal, which returns a read/
write view starting with NumPy 1.10."""
self.prom_score = np.diag(np.matmul(np.matmul(
self.A_gpars,self.social),np.transpose(self.prom_fit))).copy()
def gen_retire(self):
"""Generates the members of the population to retire with a specified
probability."""
self.empty_positions = rng.choice(a=self.n_retire_opts,
size=self.n_nodes, p=[1-self.retire_prob,self.retire_prob])
def emp_fill(self):
"""Promote non-retiring member into openings from grandchildren."""
# Loop through nodes from top down to find the ones that are empty
for ii in np.arange(self.n_nodes):
# Only perform actions for empty positions
if self.empty_positions[ii] == 1:
# Reset potentially promotable options
filled = False
A_prom = self.A_kids
# Loop through until the empty position has been filled
while not(filled):
# If employees exist in the selected generation
if np.sum(A_prom[ii,:])>0:
# If at least one employee is promotable
if np.dot(A_prom[ii,:],1-self.empty_positions)>0:
# Get the location of the most qualified employee
emp_to_prom = np.argmax(A_prom[ii,:] \
* self.prom_score * (1 - self.empty_positions))
# Promote that employee
self.emp_prom(emp_to_prom,ii)
filled = True
# Otherwise, no employees in generation are promotable
else:
# So go to the next generation (get children)
A_prom = self.A_kids @ A_prom
# No employees exist in generation (no children)
else:
# So hire a new employee to the position
self.emp_hire(ii)
filled = True
def emp_prom(self,loc_from,loc_to):
"""Promote an employee from one location to another."""
# Populate new location
self.culture[loc_to,:] = self.culture[loc_from,:]
self.social[loc_to,:] = self.social[loc_from,:]
self.from_pop[loc_to] = self.from_pop[loc_from]
self.perf_params[loc_to,:] = self.perf_params[loc_from,:]
# Clear original location
self.culture[loc_from,:] = np.zeros(self.n_cultatt)
self.from_pop[loc_from] = -1
self.perf_branch[loc_from] = 0
self.perf_indiv[loc_from] = 0
self.perf_params[loc_from,:] = np.zeros(self.n_perfatt)
self.prom_fit[loc_from,:] = np.zeros(self.n_cultatt)
self.prom_score[loc_from] = 0
self.social[loc_from,:] = np.zeros(self.n_cultatt)
# Set location as needing to be filled
self.empty_positions[loc_from] = 1
def emp_hire(self,loc_to):
"""Hire new employees into opening by population sampling."""
# Pick a new employee from possible populations
self.add_employee(loc_to)
# Generate initial culture for that employee
self.add_culture(loc_to)
self.social[loc_to,:] = self.culture[loc_to,:]
# Generate performance parameters for that employee
self.add_performance(loc_to)
# Set all performance values to zero for now
self.perf_branch[loc_to] = 0
self.perf_indiv[loc_to] = 0
self.prom_fit[loc_to,:] = np.zeros(self.n_cultatt)
self.prom_score[loc_to] = 0
def return_results(self):
"""Return the history of the organization."""
return self.history
class Population(object):
"""Defines an instances of class population from which the organization can
sample either to start or as new hires."""
def __init__(self,starting=1,hires=1,aff_dist="beta_2var",aff_sim=0.25,
aff_perf=0.25,aff_inc=0.5,aff_var=15,perf_dist="beta",
perf_mean=0.5,perf_var=15):
"""
Initializes an instance of class population.
Parameters
----------
starting : [0,1], optional
Specifies the probability that a member of the starting organization
will be from this population. All probabilities must sum to 1. The
default is 1.
hires : [0,1], optional
Specifies the probability that a new hire will will be from this
population. All probabilities must sum to 1. The default is 1.
aff_dist : STRING, optional
The culture distribution type of the population, either "beta" or
"uniform". The default is "beta".
aff_sim : [0.1,0.9], optional
The mean of the sampling distribution for an agent's affinity for
cultural similarity. Applies to only beta distributions. The default
is 0.25.
aff_perf : [0.1,0.9], optional
The mean of the sampling distribution for an agent's affinity for
performance. Applies to only beta distributions. The default is
0.25.
aff_inc : [0.1,0.9], optional
The mean of the sampling distribution for an agent's affinity for
inclusiveness. Applies to only beta distributions. The default is
0.25.
aff_var : [0.1,0.9], optional
The variance of the culture beta distribution. Applies only to beta
distributions. The default is 15.
perf_dist : STRING, optional
The performance distribution type of the population, either "beta"
or "uniform". The default is "beta".
perf_mean : [0.1,0.9], optional
The mean of the sampling distribution for an agent's performance.
Applies only to beta distributions. The default is 0.5.
perf_var : (0,inf), optional
The variance of the performance beta distribution. Applies only to
beta distributions. The default is 15.
Returns
-------
None.
"""
self.rep_start = starting
self.rep_gen = hires
self.aff_dist = aff_dist
self.aff_sim = aff_sim
self.aff_perf = aff_perf
self.aff_inc = aff_inc
self.aff_var = aff_var
self.perf_dist = perf_dist
self.perf_mean = perf_mean
self.perf_var = perf_var
class History(object):
"""Instance of a history structure for holding results. Contains structures
for demographics, culture (including similarity, performance, and
inclusiveness), socialization (including similarity, performance, and
inclusiveness), and performance (including individual and branch scores)."""
def __init__(self,n_steps,n_nodes,n_cultatt,n_perfatt):
# Create organization history arrays and dictionaries
self.demographics = np.zeros((n_steps,n_nodes))
self.culture = np.zeros((n_steps,n_nodes,n_cultatt))
self.socialization = np.zeros((n_steps,n_nodes,n_cultatt))
self.performance_params = np.zeros((n_steps,n_nodes,n_perfatt))
self.performance_indiv = np.zeros((n_steps,n_nodes))
self.performance_branch = np.zeros((n_steps,n_nodes))
self.performance_org = np.zeros((n_steps,))
self.promotion_fitness = np.zeros((n_steps,n_nodes,n_cultatt))
self.promotion_score = np.zeros((n_steps,n_nodes))
def record_history(self,step,demo,cult,soc,perf_par,perf_ind,perf_bra,
perf_org,prom_fit,prom_sco):
self.demographics[step,:] = demo.copy()
self.culture[step,:,:] = cult.copy()
self.socialization[step,:,:] = soc.copy()
self.performance_params[step,:,:] = perf_par.copy()
self.performance_indiv[step,:] = perf_ind.copy()
self.performance_branch[step,:] = perf_bra.copy()
self.performance_org[step] = perf_org.copy()
self.promotion_fitness[step,:,:] = prom_fit.copy()
self.promotion_score[step,:] = prom_sco.copy()
def beta(mu,phi):
"""Transforms beta function parameters from average and variance form to
the alpha & beta parameters"""
a = mu*phi
b = (1-mu)*phi
return a, b
def linear_uniform():
"""Generates one uniformly distributed random value and calculates two
other equal values, the three of which sum to one (2x + z = 1). First
transforms the mu and phi into a and b parameters for the beta function."""
z = rng.uniform()
x = (1 - z)/2
y = x
return x, y, z
def linear_beta(mu,phi):
"""Generates one beta distributed random value and calculates two other
equal values, the three of which sum to one (2x + z = 1). First transforms
the mu and phi into a and b parameters for the beta function."""
a, b = beta(mu,phi)
z = rng.beta(a, b)
x = (1 - z)/2
y = x
return x, y, z
def triangle_uniform():
"""Generates three uniformly random values that sum to one via triangle
point picking (see the following website for more details on the math:
https://mathworld.wolfram.com/TrianglePointPicking.html), Randomly draws
two values x and y on [0,1] and converts any values of x and y such that
x + y > 1 into values such that x + y < 1."""
x = rng.uniform()
y = rng.uniform()
if x + y > 1:
x = 1 - x
y = 1 - y
z = 1 - x - y
return x, y, z
def triangle_beta(mu1,phi1,mu2,phi2):
"""Generates three beta distributed random values that sum to one via
triangle point picking (see the following website for more details on the
math: https://mathworld.wolfram.com/TrianglePointPicking.html), Randomly
draws two values x and y on [0,1] and converts any values of x and y such
that x + y > 1 into values such that x + y < 1."""
a1, b1 = beta(mu1,phi1)
a2, b2 = beta(mu2,phi2)
valid = False
while not(valid):
x = rng.beta(a1,b1)
y = rng.beta(a2,b2)
if x + y <= 1:
valid = True
z = 1 - x - y
return x, y, z
if __name__ == '__main__':
org_test = Organization() | 0.833392 | 0.492127 |
import random
import warnings
from extra.trees.bst import BSTNode, BST
class TreapNode(BSTNode):
"""
A treap node is the basic unit for building Treap instances. A treap node
must contain a number. Each treap node has either zero, one or two children
treap nodes. The node that has no children is called a **leaf node**.
"""
__name__ = "extra.TreapNode()"
def __init__(self, data, priority=None):
"""
Creates a `TreapNode()` object which is the basic unit for building
`Treap()` objects!!
Parameters
----------
data: int or float
The value to be saved within the `TreapNode()` instance
priority: int or float (default: None)
A numeric value indicating the priority of the `TreapNode()`.
Raises
------
ValueError:
If the given data is `None`.
TypeError:
It can be raised in the following two cases:
1. If the given data isn't a number.
2. If the given priority isn't a number.
"""
if priority is not None and type(priority) not in {int, float}:
raise TypeError("Given priority has to be a number!!")
super().__init__(data)
self._priority = (
random.randint(0, 100)
if priority is None
else priority
)
def get_priority(self):
"""
Returns the priority of the current `TreapNode()` instance.
Returns
-------
int or float:
The priority of the current `TreapNode()`.
"""
return self._priority
def set_priority(self, new_priority):
"""
Sets the given priority as the priority of the current `TreapNode()`.
Parameters
----------
new_priority: int or float
The new priority of the current `TreapNode()`.
Raises
------
TypeError:
If the given priority is not a number.
"""
if type(new_priority) not in {int, float}:
raise TypeError("Given priority has to be a number!!")
self._priority = new_priority
def __repr__(self):
"""
Represents `TreapNode()` object as a string.
Returns
-------
str:
A string representing the `TreapNode()` instance.
Example
-------
>>> x = TreapNode(10, priority=0)
>>> x
TreapNode(data: 10, priority: 0)
"""
return f"TreapNode(data: {self._data}, Priority: {self._priority})"
def _represent(self):
"""
A helpful function used to represent the node when printing!!
Returns
-------
str:
A string representing the `TreapNode()` is a very simple way.
Example
-------
>>> x = TreapNode(10, priority=2)
>>> x
TreapNode(data:10, priority:2)
>>> x._represent()
10
>>> type(x._represent())
<class 'str'>
And if we set the `SHOW_PRIORITY` static variable to `True`, it will
look like this:
>>> Treap.SHOW_PRIORITY = True
>>> x._represent()
10|P:2
"""
if Treap.SHOW_PRIORITY:
return f"{self._data}|P:{self._priority}"
else:
return f"{self._data}"
class Treap(BST):
"""
A Treap is a binary tree that stores a collection of nodes. Each node in
the treap contains two main values: "data" and "priority" and must satisfy
two additional properties:
1. node's data must follow the rules of binary search tree.
2. node's priority must follow the rules of max heap where the node with \
the heighest priority must always be at the root without breaking the \
rules of BST.
"""
SHOW_PRIORITY = False
_basic_node = TreapNode
__name__ = "extra.Treap()"
def __init__(self, iterable=None, seed=None):
"""
Initializes a `Treap()` instance using an optional iterable object in
time-complexity of O(n) where **n** is the number of elements inside
the given `iterable`.
Parameters
----------
iterable: iterable (default: None)
An iterable python object that implements the `__iter__` method.
For example, `list` and `tuple` are both iterables.
seed: int or float (default: None)
A seed to generate consistent random numbers.
Raises
------
TypeError:
It can be raised in three cases
1. In case the given object isn't iterable.
2. If one of the elements in the iterable is an `Extra` object.
3. If one of the elements in the iterable is NOT a number.
ValueError:
If one of the iterable elements is `None`.
Examples
--------
>>> treap = Treap([0, 2, 1, 4, 9, 7, 3], seed=123)
>>> treap
__4__
/ \\
2 9
/ \\ /
1 3 7
/
0
Without setting the seed, each time we run the code, we will get a
different structure.
>>> Treap([0, 2, 1, 4, 9, 7, 3])
____4
/ \\
1__ 7
/ \\ \\
0 3 9
/
2
>>> Treap([0, 2, 1, 4, 9, 7, 3])
____7
/ \\
2__ 9
/ \\
1 4
/ /
0 3
Using an iterable object with `None` as one of its elements will raise
`ValueError`
>>> Treap([2, None])
ValueError: Can't use `None` as an element within `extra.Treap()`!!
Using a non-iterable object will raise `TypeError`
>>> Treap(2)
TypeError: The given object isn't iterable!!
Using nested `Treap()` objects will raise `TypeError` as well
>>> treap_1 = Treap([1])
>>> treap_2 = Treap([1, treap_1])
TypeError: Can't create `extra.Treap()` using `extra.Treap()`!!
"""
random.seed(seed)
super().__init__(iterable)
# ============================= LENGTH ==============================
def __len__(self):
"""
Gets the length of the `Treap()` instance in time-complexity of O(1).
Returns
-------
int:
The length of the `Treap()` instance. Length is the number of tree
nodes in the instance.
Example
-------
>>> treap = Treap([0, 2, 1, 4, 9, 7, 3], seed=123)
>>> treap
__4__
/ \\
2 9
/ \\ /
1 3 7
/
0
>>> len(treap)
7
"""
return self._length
def is_empty(self):
"""
Checks if the `Treap()` instance is empty or not in constant time.
Returns
-------
bool:
A boolean flag showing if the `Treap()` instance is empty or not.
`True` shows that this instance is empty and `False` shows it's
not empty.
Example
--------
>>> treap = Treap()
>>> treap.is_empty()
True
>>> treap.insert(10)
>>> treap.is_empty()
False
"""
return super().is_empty()
# ============================= MIN/MAX ==============================
def get_max(self):
"""
Gets the maximum value in the `Treap()` isntance. The maximum value can
be found at the right-most tree node in the `Treap()` instance.
Returns
-------
int or float:
The maximum numeric value in the `Treap()` instance.
Raises
------
IndexError:
In case the `Treap()` instance is empty.
Example
-------
>>> treap = Treap([0, 2, 1, 4, 9, 7, 3], seed=123)
>>> treap
__4__
/ \\
2 9
/ \\ /
1 3 7
/
0
>>> treap.get_max()
9
"""
return super().get_max()
def get_min(self):
"""
Gets the minimum value in the `Treap()` isntance. The minimum value can
be found at the left-most tree node in the `Treap()` instance.
Returns
-------
int or float:
The minimum numeric value in the `Treap()` instance.
Raises
------
IndexError:
In case the `Treap()` instance is empty.
Example
-------
>>> treap = Treap([0, 2, 1, 4, 9, 7, 3], seed=123)
>>> treap
__4__
/ \\
2 9
/ \\ /
1 3 7
/
0
>>> treap.get_min()
0
"""
return super().get_min()
# ============================= SEARCH ==============================
def __contains__(self, find_val):
"""
Searches the `Treap()` for the given value and returns `True` if the
value exists and `False` if not.
Parameters
----------
find_val: int or float
The value to be searched for in the `Treap()` instance.
Returns
-------
bool:
Returns `True` if the value exists in the `Treap()` instance and
`False` if not.
Example
-------
>>> treap = Treap([0, 2, 1, 4, 9, 7, 3], seed=123)
>>> treap
__4__
/ \\
2 9
/ \\ /
1 3 7
/
0
>>> 3 in treap
True
>> 50 in treap
False
"""
return super().__contains__(find_val)
# ============================= INSERT ==============================
def __validate_priority(self, new_priority):
"""
Makes sure the priority is a valid value.
Parameters
----------
new_priority: int or flaot
The priority's new value.
Raises
-------
TypeError:
If the given new priority is not a numeric value.
"""
if new_priority is not None and type(new_priority) not in {int, float}:
raise TypeError("Given priority has to be a number!!")
def insert(self, value, priority=None):
"""
Inserts a numeric value in the `Treap()` instance according to the
rules of binary search trees and max heap.
Parameters
----------
value: int or float
The new numeric value that will be inserted.
priority: int or float (default: None)
The priority of the newly inserted node.
Raises
------
ValueError:
If the given `value` is `None`.
TypeError:
If either the given `value` or the given `priority` is not a
numeric value.
Example
-------
>>> treap = Treap()
>>> treap.insert(10)
>>> treap.insert(5)
>>> treap.insert(15)
>>> treap
___15
/
5
\\
10
If we ran the same code again, we probably will get a different
structure because the priority of the nodes are assigned randomly which
changes the `Treap()` structure. Let's, now, set the priority of the
inserted node:
>>> treap = Treap()
>>> treap.insert(10, priority=10)
>>> treap.insert(5, priority=2)
>>> treap.insert(15, priority=7)
>>> treap
10
/ \\
5 15
>>> Treap.SHOW_PRIORITY = True
__10|P:10__
/ \\
5|P:2 15|P:7
>>> treap.insert("2")
TypeError: `extra.Treap()` accepts only numbers!!
"""
# validate inserted value
super()._validate_item(value)
self.__validate_priority(priority)
if self.is_empty():
self._root = self._basic_node(value, priority)
self._length += 1
else:
# perform standard BST-insert
new_node = super()._insert_node(
self._root, self._basic_node(value, priority)
)
# using rotations when necessary
parent = new_node.get_parent()
while parent is not None:
grandparent = parent.get_parent()
if parent.get_priority() > new_node.get_priority():
break
else:
if new_node.is_left_child():
parent = super()._rotate_right(parent)
else:
parent = super()._rotate_left(parent)
super()._attach(grandparent, parent)
new_node = parent
parent = grandparent
# ============================= REMOVE ==============================
def remove(self, del_value):
"""
Removes the `del_value` from the `Treap()` instance.
Parameters
----------
del_value: int or float
The value to be deleted from the `Treap()`.
Raises
------
UserWarning:
If the `Treap()` instance is empty of if the value wasn't found in
the instance.
Example
-------
>>> treap = Treap([0, 2, 1, 4, 9, 7, 3], seed=123)
>>> treap
__4__
/ \\
2 9
/ \\ /
1 3 7
/
0
>>> treap.remove(9)
>>> treap.remove(0)
>>> treap
__4
/ \\
2 7
/ \\
1 3
>>> treap.remove(50)
UserWarning: Couldn't find `50` in `extra.Treap()`!!
"""
if self.is_empty():
warnings.warn(f"`{self.__name__}` is empty!!", UserWarning)
return
elif type(del_value) not in {int, float}:
warnings.warn(
f"Couldn't find `{del_value}` in `{self.__name__}`!!",
UserWarning
)
return
# check if it's the only value
elif self._root.is_leaf() and del_value == self._root.get_data():
self._root = None
self._length -= 1
else:
# search for the del_value node
removed_node = super()._search(del_value, self._root)
# couldn't find the node
if removed_node.get_data() != del_value:
warnings.warn(
f"Couldn't find `{del_value}` in `{self.__name__}`",
UserWarning
)
return
# rotate till removed_node is leaf
parent = removed_node.get_parent()
while not removed_node.is_leaf():
# get children's priority
left_child = removed_node.get_left()
right_child = removed_node.get_right()
left_priority = left_child.get_priority() if left_child else -1
right_priority = (
right_child.get_priority()
if right_child
else -1
)
# perform rotation
if left_priority > right_priority:
removed_node = super()._rotate_right(removed_node)
super()._attach(parent, removed_node)
parent = removed_node
removed_node = parent.get_right()
else:
removed_node = super()._rotate_left(removed_node)
super()._attach(parent, removed_node)
parent = removed_node
removed_node = parent.get_left()
# perform the removal
if removed_node.is_left_child():
parent.set_left(None)
else:
parent.set_right(None)
# decrement treap length
self._length -= 1
def clear(self):
"""
Removes all nodes within the `Treap()` instance in constant time.
Example
-------
>>> treap = Treap([0, 2, 1, 4, 9, 7, 3], seed=123)
>>> treap
__4__
/ \\
2 9
/ \\ /
1 3 7
/
0
>>> treap.clear()
>>> treap
/ \\
>>> treap.is_empty()
True
"""
super().clear()
# ============================= HEIGHT/DEPTH ==============================
def get_height(self):
"""
Gets the height of the `Treap()` instance. The Treap's height is the
number of edges between the root and the furthest leaf node.
Returns
-------
int:
A positive integer representing the height of the instance.
Example
-------
>>> treap = Treap([0, 2, 1, 4, 9, 7, 3], seed=123)
>>> treap
__4__
/ \\
2 9
/ \\ /
1 3 7
/
0
>>> treap.get_height()
3
"""
return super().get_height()
def get_depth(self):
"""
Gets the depth of the `Treap()` instance.
Returns
-------
int:
A positive integer representing the depth of the instance.
Example
-------
>>> treap = Treap([0, 2, 1, 4, 9, 7, 3], seed=123)
>>> treap
__4__
/ \\
2 9
/ \\ /
1 3 7
/
0
>>> treap.get_depth()
0
"""
return super().get_depth()
# ============================= LEAF NODES ==============================
def count_leaf_nodes(self):
"""
Counts the number of leaf nodes in the `Treap()` instance. Leaf nodes
are the tree nodes that have no children.
Returns
-------
int:
A positive integer representing the number of leaf nodes in the
`Treap()`.
Example
-------
>>> treap = Treap([0, 2, 1, 4, 9, 7, 3], seed=123)
>>> treap
__4__
/ \\
2 9
/ \\ /
1 3 7
/
0
>>> treap.count_leaf_nodes()
3
"""
return super().count_leaf_nodes()
# ============================= BALANCE ==============================
def is_balanced(self):
"""
Checks if the `Treap()` instance is balanced. A Treap is balanced if
the difference between the depth of any two leaf nodes is less than or
equal to one.
Returns
-------
bool:
`True` if the `Treap()` instance is balanced and `False` if it is
not balanced.
Raises
------
UserWarning:
If the `Treap()` is empty.
Example
-------
>>> treap = Treap([0, 2, 1, 4, 9, 7, 3], seed=123)
>>> treap
__4__
/ \\
2 9
/ \\ /
1 3 7
/
0
>>> treap.is_balanced()
True
Notice that, by changing the seed, you can change the balance of the
`Treap()`:
>>> treap = Treap([0, 2, 1, 4, 9, 7, 3], seed=2)
>>> treap
__7
/ \\
3 9
/ \\
2 4
/
1
/
0
>>> treap.is_balanced()
False
"""
return super().is_balanced()
# ============================= PERFECT ==============================
def is_perfect(self):
"""
Checks if the `Treap()` instance is perfect. A Treap is perfect if all
its levels are completely filled.
Returns
-------
bool:
`True` if the `Treap()` instance is perfect and `False` if it is
not perfect.
Raises
------
UserWarning: If the `Treap()` is empty.
Example
-------
>>> treap = Treap([0, 2, 1, 4, 9, 7, 3], seed=123)
>>> treap
__4__
/ \\
2 9
/ \\ /
1 3 7
/
0
>>> treap.is_perfect()
False
Note
----
By changing the seed, you can change the perfectness of the`Treap()`,
"""
return super().is_perfect()
# ============================= STRICT ==============================
def is_strict(self):
"""
Checks if the `Treap()` instance is strict. A Treap is strict if all
its non-leaf nodes have two children (left and right).
Returns
-------
bool:
`True` if the `Treap()` instance is strict and `False` if it is not
strict.
Raises
------
UserWarning: If the `Treap()` is empty.
Example
-------
>>> treap = Treap([0, 2, 1, 4, 9, 7, 3], seed=123)
>>> treap
__4__
/ \\
2 9
/ \\ /
1 3 7
/
0
>>> treap.is_strict()
False
Note
----
By changing the seed, you can change the strictness of the`Treap()`,
"""
return super().is_strict()
# ============================= ITER ==============================
def __iter__(self):
"""
Iterates over the `Treap()` instance and returns a generator of the
`BSTNode()` values in breadth-first manner.
Yields
------
int or float:
The number stored ateach node in the instance.
Example
-------
>>> treap = Treap([0, 2, 1, 4, 9, 7, 3], seed=123)
>>> treap
__4__
/ \\
2 9
/ \\ /
1 3 7
/
0
>>> for value in treap:
... print(value, end=',')
4,2,9,1,3,7,0,
"""
return super().__iter__()
def to_list(self):
"""
Converts the `Treap()` instance to a `list` where values will be
inserted in breadth-first manner.
Returns
-------
list:
A `list` object containing the same elements as the `Treap()`
instance.
Example
-------
>>> treap = Treap([0, 2, 1, 4, 9, 7, 3], seed=123)
>>> treap
__4__
/ \\
2 9
/ \\ /
1 3 7
/
0
>>> treap.to_list()
[4, 2, 9, 1, 3, 7, 0]
"""
return super().to_list()
# ============================= NODES ==============================
def get_nodes_per_level(self):
"""
Retrieves all tree nodes within the `Treap()` instance so that all
tree nodes in a certain level will be concatenated into a separate
list.
Returns
-------
list:
A nested list where the first inner-list has all the tree nodes in
the first level, the second inner-list has all the tree nodes in
the second level, ... so on.
Example
-------
>>> treap = Treap([0, 2, 1, 4, 9, 7, 3], seed=123)
>>> treap
__4__
/ \\
2 9
/ \\ /
1 3 7
/
0
>>> treap.get_nodes_per_level()
[[4], [2, 9], [1, 3, 7], [0]]
"""
return super().get_nodes_per_level()
# ============================= PRE-ORDER ==============================
def preorder_traverse(self):
"""
Traverses the `Treap()` instance in pre-order manner. Which means that
the **parent** is visited first. Then, the **left subtree** (if found),
then the **right subtree** (if found).
Note
-----
It's the same as `depth_first_traverse()` method.
Returns
--------
list:
A list of all values of the pre-order visited nodes.
Example
-------
>>> treap = Treap([0, 2, 1, 4, 9, 7, 3], seed=123)
>>> treap
__4__
/ \\
2 9
/ \\ /
1 3 7
/
0
>>> treap.preorder_traverse()
[4, 2, 1, 0, 3, 9, 7]
"""
return super().preorder_traverse()
def depth_first_traverse(self):
"""
Traverses the `Treap()` instance in depth-first manner. Which means
that the **parent** is visited first. Then, the **left subtree**
(if found), then the **right subtree** (if found).
Note
-----
It's the same as `preorder_traverse()` method.
Returns
--------
list:
A list of all values of the pre-order visited nodes.
Example
-------
>>> treap = Treap([0, 2, 1, 4, 9, 7, 3], seed=123)
>>> treap
__4__
/ \\
2 9
/ \\ /
1 3 7
/
0
>>> treap.depth_first_traverse()
[4, 2, 1, 0, 3, 9, 7]
"""
return super().depth_first_traverse()
# ============================= POST-ORDER ==============================
def postorder_traverse(self):
"""
Traverses the `Treap()` instance in post-order manner. Which means that
the **left subtree** (if found) is visited first. Then, the **right
subtree** (if found) then the **parent**.
Returns
--------
list:
A list of all values of the pre-order visited nodes.
Example
-------
>>> treap = Treap([0, 2, 1, 4, 9, 7, 3], seed=123)
>>> treap
__4__
/ \\
2 9
/ \\ /
1 3 7
/
0
>>> treap.postorder_traverse()
[0, 1, 3, 2, 7, 9, 4]
"""
return super().postorder_traverse()
# ============================= IN-ORDER ==============================
def inorder_traverse(self):
"""
Traverses the `Treap()` instance in in-order manner. Which means that
the **left subtree** (if found) is visited first. Then, the **parent**
then the **right subtree** (if found).
Returns
--------
list:
A list of all values of the in-order visited nodes.
Example
-------
>>> treap = Treap([0, 2, 1, 4, 9, 7, 3], seed=123)
>>> treap
__4__
/ \\
2 9
/ \\ /
1 3 7
/
0
>>> treap.inrder_traverse()
[0, 1, 2, 3, 4, 7, 9]
"""
return super().inorder_traverse()
# ============================= BREADTH-FIRST==============================
def breadth_first_traverse(self):
"""
Traverses the `Treap()` instance in breadth-first manner. Which means
that the tree nodes will be visited level by level.
Returns
--------
list:
A list of all values of the pre-order visited nodes.
Example
-------
>>> treap = Treap([0, 2, 1, 4, 9, 7, 3], seed=123)
>>> treap
__4__
/ \\
2 9
/ \\ /
1 3 7
/
0
>>> treap.breadth_first_traverse()
[4, 2, 9, 1, 3, 7, 0]
"""
return super().breadth_first_traverse()
# ============================= TRAVERSE ==============================
def traverse(self, method="inorder"):
"""
Traversal is the process to visit all nodes of a Treap starting from
the root as we cannot randomly access any node in a binary tree. There
are four ways which we use to traverse a Treap:
1. preorder - depth-first
2. inorder
3. posteorder
4. breadth-first
Parameters
----------
method: str (default="inorder")
A lower-cased string describing the type of traversal that will be
used. It could be one of these values: ["inorder", "postorder",
"preorder", "depth-first", "breadth-first"]
Returns
--------
list:
A list of all values of the visited nodes according to the
specified traversal method.
Raises
------
ValueError:
If the given method isn't known.
TypeError:
If the given method isn't a string.
Example
-------
>>> treap = Treap([0, 2, 1, 4, 9, 7, 3], seed=123)
>>> treap
__4__
/ \\
2 9
/ \\ /
1 3 7
/
0
>>> treap.traverse("preorder")
[4, 2, 1, 0, 3, 9, 7]
>>> treap.traverse("inorder")
[0, 1, 2, 3, 4, 7, 9]
>>> treap.traverse("postorder")
[0, 1, 3, 2, 7, 9, 4]
>>> treap.traverse("breadth-first")
[4, 2, 9, 1, 3, 7, 0]
>>> treap.traverse("extra")
ValueError: Given traverse method has to be one of these:
{'breadth-first', 'postorder', 'inorder', 'depth-first', 'preorder'}
"""
return super().traverse(method) | extra/trees/treap.py | import random
import warnings
from extra.trees.bst import BSTNode, BST
class TreapNode(BSTNode):
"""
A treap node is the basic unit for building Treap instances. A treap node
must contain a number. Each treap node has either zero, one or two children
treap nodes. The node that has no children is called a **leaf node**.
"""
__name__ = "extra.TreapNode()"
def __init__(self, data, priority=None):
"""
Creates a `TreapNode()` object which is the basic unit for building
`Treap()` objects!!
Parameters
----------
data: int or float
The value to be saved within the `TreapNode()` instance
priority: int or float (default: None)
A numeric value indicating the priority of the `TreapNode()`.
Raises
------
ValueError:
If the given data is `None`.
TypeError:
It can be raised in the following two cases:
1. If the given data isn't a number.
2. If the given priority isn't a number.
"""
if priority is not None and type(priority) not in {int, float}:
raise TypeError("Given priority has to be a number!!")
super().__init__(data)
self._priority = (
random.randint(0, 100)
if priority is None
else priority
)
def get_priority(self):
"""
Returns the priority of the current `TreapNode()` instance.
Returns
-------
int or float:
The priority of the current `TreapNode()`.
"""
return self._priority
def set_priority(self, new_priority):
"""
Sets the given priority as the priority of the current `TreapNode()`.
Parameters
----------
new_priority: int or float
The new priority of the current `TreapNode()`.
Raises
------
TypeError:
If the given priority is not a number.
"""
if type(new_priority) not in {int, float}:
raise TypeError("Given priority has to be a number!!")
self._priority = new_priority
def __repr__(self):
"""
Represents `TreapNode()` object as a string.
Returns
-------
str:
A string representing the `TreapNode()` instance.
Example
-------
>>> x = TreapNode(10, priority=0)
>>> x
TreapNode(data: 10, priority: 0)
"""
return f"TreapNode(data: {self._data}, Priority: {self._priority})"
def _represent(self):
"""
A helpful function used to represent the node when printing!!
Returns
-------
str:
A string representing the `TreapNode()` is a very simple way.
Example
-------
>>> x = TreapNode(10, priority=2)
>>> x
TreapNode(data:10, priority:2)
>>> x._represent()
10
>>> type(x._represent())
<class 'str'>
And if we set the `SHOW_PRIORITY` static variable to `True`, it will
look like this:
>>> Treap.SHOW_PRIORITY = True
>>> x._represent()
10|P:2
"""
if Treap.SHOW_PRIORITY:
return f"{self._data}|P:{self._priority}"
else:
return f"{self._data}"
class Treap(BST):
"""
A Treap is a binary tree that stores a collection of nodes. Each node in
the treap contains two main values: "data" and "priority" and must satisfy
two additional properties:
1. node's data must follow the rules of binary search tree.
2. node's priority must follow the rules of max heap where the node with \
the heighest priority must always be at the root without breaking the \
rules of BST.
"""
SHOW_PRIORITY = False
_basic_node = TreapNode
__name__ = "extra.Treap()"
def __init__(self, iterable=None, seed=None):
"""
Initializes a `Treap()` instance using an optional iterable object in
time-complexity of O(n) where **n** is the number of elements inside
the given `iterable`.
Parameters
----------
iterable: iterable (default: None)
An iterable python object that implements the `__iter__` method.
For example, `list` and `tuple` are both iterables.
seed: int or float (default: None)
A seed to generate consistent random numbers.
Raises
------
TypeError:
It can be raised in three cases
1. In case the given object isn't iterable.
2. If one of the elements in the iterable is an `Extra` object.
3. If one of the elements in the iterable is NOT a number.
ValueError:
If one of the iterable elements is `None`.
Examples
--------
>>> treap = Treap([0, 2, 1, 4, 9, 7, 3], seed=123)
>>> treap
__4__
/ \\
2 9
/ \\ /
1 3 7
/
0
Without setting the seed, each time we run the code, we will get a
different structure.
>>> Treap([0, 2, 1, 4, 9, 7, 3])
____4
/ \\
1__ 7
/ \\ \\
0 3 9
/
2
>>> Treap([0, 2, 1, 4, 9, 7, 3])
____7
/ \\
2__ 9
/ \\
1 4
/ /
0 3
Using an iterable object with `None` as one of its elements will raise
`ValueError`
>>> Treap([2, None])
ValueError: Can't use `None` as an element within `extra.Treap()`!!
Using a non-iterable object will raise `TypeError`
>>> Treap(2)
TypeError: The given object isn't iterable!!
Using nested `Treap()` objects will raise `TypeError` as well
>>> treap_1 = Treap([1])
>>> treap_2 = Treap([1, treap_1])
TypeError: Can't create `extra.Treap()` using `extra.Treap()`!!
"""
random.seed(seed)
super().__init__(iterable)
# ============================= LENGTH ==============================
def __len__(self):
"""
Gets the length of the `Treap()` instance in time-complexity of O(1).
Returns
-------
int:
The length of the `Treap()` instance. Length is the number of tree
nodes in the instance.
Example
-------
>>> treap = Treap([0, 2, 1, 4, 9, 7, 3], seed=123)
>>> treap
__4__
/ \\
2 9
/ \\ /
1 3 7
/
0
>>> len(treap)
7
"""
return self._length
def is_empty(self):
"""
Checks if the `Treap()` instance is empty or not in constant time.
Returns
-------
bool:
A boolean flag showing if the `Treap()` instance is empty or not.
`True` shows that this instance is empty and `False` shows it's
not empty.
Example
--------
>>> treap = Treap()
>>> treap.is_empty()
True
>>> treap.insert(10)
>>> treap.is_empty()
False
"""
return super().is_empty()
# ============================= MIN/MAX ==============================
def get_max(self):
"""
Gets the maximum value in the `Treap()` isntance. The maximum value can
be found at the right-most tree node in the `Treap()` instance.
Returns
-------
int or float:
The maximum numeric value in the `Treap()` instance.
Raises
------
IndexError:
In case the `Treap()` instance is empty.
Example
-------
>>> treap = Treap([0, 2, 1, 4, 9, 7, 3], seed=123)
>>> treap
__4__
/ \\
2 9
/ \\ /
1 3 7
/
0
>>> treap.get_max()
9
"""
return super().get_max()
def get_min(self):
"""
Gets the minimum value in the `Treap()` isntance. The minimum value can
be found at the left-most tree node in the `Treap()` instance.
Returns
-------
int or float:
The minimum numeric value in the `Treap()` instance.
Raises
------
IndexError:
In case the `Treap()` instance is empty.
Example
-------
>>> treap = Treap([0, 2, 1, 4, 9, 7, 3], seed=123)
>>> treap
__4__
/ \\
2 9
/ \\ /
1 3 7
/
0
>>> treap.get_min()
0
"""
return super().get_min()
# ============================= SEARCH ==============================
def __contains__(self, find_val):
"""
Searches the `Treap()` for the given value and returns `True` if the
value exists and `False` if not.
Parameters
----------
find_val: int or float
The value to be searched for in the `Treap()` instance.
Returns
-------
bool:
Returns `True` if the value exists in the `Treap()` instance and
`False` if not.
Example
-------
>>> treap = Treap([0, 2, 1, 4, 9, 7, 3], seed=123)
>>> treap
__4__
/ \\
2 9
/ \\ /
1 3 7
/
0
>>> 3 in treap
True
>> 50 in treap
False
"""
return super().__contains__(find_val)
# ============================= INSERT ==============================
def __validate_priority(self, new_priority):
"""
Makes sure the priority is a valid value.
Parameters
----------
new_priority: int or flaot
The priority's new value.
Raises
-------
TypeError:
If the given new priority is not a numeric value.
"""
if new_priority is not None and type(new_priority) not in {int, float}:
raise TypeError("Given priority has to be a number!!")
def insert(self, value, priority=None):
"""
Inserts a numeric value in the `Treap()` instance according to the
rules of binary search trees and max heap.
Parameters
----------
value: int or float
The new numeric value that will be inserted.
priority: int or float (default: None)
The priority of the newly inserted node.
Raises
------
ValueError:
If the given `value` is `None`.
TypeError:
If either the given `value` or the given `priority` is not a
numeric value.
Example
-------
>>> treap = Treap()
>>> treap.insert(10)
>>> treap.insert(5)
>>> treap.insert(15)
>>> treap
___15
/
5
\\
10
If we ran the same code again, we probably will get a different
structure because the priority of the nodes are assigned randomly which
changes the `Treap()` structure. Let's, now, set the priority of the
inserted node:
>>> treap = Treap()
>>> treap.insert(10, priority=10)
>>> treap.insert(5, priority=2)
>>> treap.insert(15, priority=7)
>>> treap
10
/ \\
5 15
>>> Treap.SHOW_PRIORITY = True
__10|P:10__
/ \\
5|P:2 15|P:7
>>> treap.insert("2")
TypeError: `extra.Treap()` accepts only numbers!!
"""
# validate inserted value
super()._validate_item(value)
self.__validate_priority(priority)
if self.is_empty():
self._root = self._basic_node(value, priority)
self._length += 1
else:
# perform standard BST-insert
new_node = super()._insert_node(
self._root, self._basic_node(value, priority)
)
# using rotations when necessary
parent = new_node.get_parent()
while parent is not None:
grandparent = parent.get_parent()
if parent.get_priority() > new_node.get_priority():
break
else:
if new_node.is_left_child():
parent = super()._rotate_right(parent)
else:
parent = super()._rotate_left(parent)
super()._attach(grandparent, parent)
new_node = parent
parent = grandparent
# ============================= REMOVE ==============================
def remove(self, del_value):
"""
Removes the `del_value` from the `Treap()` instance.
Parameters
----------
del_value: int or float
The value to be deleted from the `Treap()`.
Raises
------
UserWarning:
If the `Treap()` instance is empty of if the value wasn't found in
the instance.
Example
-------
>>> treap = Treap([0, 2, 1, 4, 9, 7, 3], seed=123)
>>> treap
__4__
/ \\
2 9
/ \\ /
1 3 7
/
0
>>> treap.remove(9)
>>> treap.remove(0)
>>> treap
__4
/ \\
2 7
/ \\
1 3
>>> treap.remove(50)
UserWarning: Couldn't find `50` in `extra.Treap()`!!
"""
if self.is_empty():
warnings.warn(f"`{self.__name__}` is empty!!", UserWarning)
return
elif type(del_value) not in {int, float}:
warnings.warn(
f"Couldn't find `{del_value}` in `{self.__name__}`!!",
UserWarning
)
return
# check if it's the only value
elif self._root.is_leaf() and del_value == self._root.get_data():
self._root = None
self._length -= 1
else:
# search for the del_value node
removed_node = super()._search(del_value, self._root)
# couldn't find the node
if removed_node.get_data() != del_value:
warnings.warn(
f"Couldn't find `{del_value}` in `{self.__name__}`",
UserWarning
)
return
# rotate till removed_node is leaf
parent = removed_node.get_parent()
while not removed_node.is_leaf():
# get children's priority
left_child = removed_node.get_left()
right_child = removed_node.get_right()
left_priority = left_child.get_priority() if left_child else -1
right_priority = (
right_child.get_priority()
if right_child
else -1
)
# perform rotation
if left_priority > right_priority:
removed_node = super()._rotate_right(removed_node)
super()._attach(parent, removed_node)
parent = removed_node
removed_node = parent.get_right()
else:
removed_node = super()._rotate_left(removed_node)
super()._attach(parent, removed_node)
parent = removed_node
removed_node = parent.get_left()
# perform the removal
if removed_node.is_left_child():
parent.set_left(None)
else:
parent.set_right(None)
# decrement treap length
self._length -= 1
def clear(self):
"""
Removes all nodes within the `Treap()` instance in constant time.
Example
-------
>>> treap = Treap([0, 2, 1, 4, 9, 7, 3], seed=123)
>>> treap
__4__
/ \\
2 9
/ \\ /
1 3 7
/
0
>>> treap.clear()
>>> treap
/ \\
>>> treap.is_empty()
True
"""
super().clear()
# ============================= HEIGHT/DEPTH ==============================
def get_height(self):
"""
Gets the height of the `Treap()` instance. The Treap's height is the
number of edges between the root and the furthest leaf node.
Returns
-------
int:
A positive integer representing the height of the instance.
Example
-------
>>> treap = Treap([0, 2, 1, 4, 9, 7, 3], seed=123)
>>> treap
__4__
/ \\
2 9
/ \\ /
1 3 7
/
0
>>> treap.get_height()
3
"""
return super().get_height()
def get_depth(self):
"""
Gets the depth of the `Treap()` instance.
Returns
-------
int:
A positive integer representing the depth of the instance.
Example
-------
>>> treap = Treap([0, 2, 1, 4, 9, 7, 3], seed=123)
>>> treap
__4__
/ \\
2 9
/ \\ /
1 3 7
/
0
>>> treap.get_depth()
0
"""
return super().get_depth()
# ============================= LEAF NODES ==============================
def count_leaf_nodes(self):
"""
Counts the number of leaf nodes in the `Treap()` instance. Leaf nodes
are the tree nodes that have no children.
Returns
-------
int:
A positive integer representing the number of leaf nodes in the
`Treap()`.
Example
-------
>>> treap = Treap([0, 2, 1, 4, 9, 7, 3], seed=123)
>>> treap
__4__
/ \\
2 9
/ \\ /
1 3 7
/
0
>>> treap.count_leaf_nodes()
3
"""
return super().count_leaf_nodes()
# ============================= BALANCE ==============================
def is_balanced(self):
"""
Checks if the `Treap()` instance is balanced. A Treap is balanced if
the difference between the depth of any two leaf nodes is less than or
equal to one.
Returns
-------
bool:
`True` if the `Treap()` instance is balanced and `False` if it is
not balanced.
Raises
------
UserWarning:
If the `Treap()` is empty.
Example
-------
>>> treap = Treap([0, 2, 1, 4, 9, 7, 3], seed=123)
>>> treap
__4__
/ \\
2 9
/ \\ /
1 3 7
/
0
>>> treap.is_balanced()
True
Notice that, by changing the seed, you can change the balance of the
`Treap()`:
>>> treap = Treap([0, 2, 1, 4, 9, 7, 3], seed=2)
>>> treap
__7
/ \\
3 9
/ \\
2 4
/
1
/
0
>>> treap.is_balanced()
False
"""
return super().is_balanced()
# ============================= PERFECT ==============================
def is_perfect(self):
"""
Checks if the `Treap()` instance is perfect. A Treap is perfect if all
its levels are completely filled.
Returns
-------
bool:
`True` if the `Treap()` instance is perfect and `False` if it is
not perfect.
Raises
------
UserWarning: If the `Treap()` is empty.
Example
-------
>>> treap = Treap([0, 2, 1, 4, 9, 7, 3], seed=123)
>>> treap
__4__
/ \\
2 9
/ \\ /
1 3 7
/
0
>>> treap.is_perfect()
False
Note
----
By changing the seed, you can change the perfectness of the`Treap()`,
"""
return super().is_perfect()
# ============================= STRICT ==============================
def is_strict(self):
"""
Checks if the `Treap()` instance is strict. A Treap is strict if all
its non-leaf nodes have two children (left and right).
Returns
-------
bool:
`True` if the `Treap()` instance is strict and `False` if it is not
strict.
Raises
------
UserWarning: If the `Treap()` is empty.
Example
-------
>>> treap = Treap([0, 2, 1, 4, 9, 7, 3], seed=123)
>>> treap
__4__
/ \\
2 9
/ \\ /
1 3 7
/
0
>>> treap.is_strict()
False
Note
----
By changing the seed, you can change the strictness of the`Treap()`,
"""
return super().is_strict()
# ============================= ITER ==============================
def __iter__(self):
"""
Iterates over the `Treap()` instance and returns a generator of the
`BSTNode()` values in breadth-first manner.
Yields
------
int or float:
The number stored ateach node in the instance.
Example
-------
>>> treap = Treap([0, 2, 1, 4, 9, 7, 3], seed=123)
>>> treap
__4__
/ \\
2 9
/ \\ /
1 3 7
/
0
>>> for value in treap:
... print(value, end=',')
4,2,9,1,3,7,0,
"""
return super().__iter__()
def to_list(self):
"""
Converts the `Treap()` instance to a `list` where values will be
inserted in breadth-first manner.
Returns
-------
list:
A `list` object containing the same elements as the `Treap()`
instance.
Example
-------
>>> treap = Treap([0, 2, 1, 4, 9, 7, 3], seed=123)
>>> treap
__4__
/ \\
2 9
/ \\ /
1 3 7
/
0
>>> treap.to_list()
[4, 2, 9, 1, 3, 7, 0]
"""
return super().to_list()
# ============================= NODES ==============================
def get_nodes_per_level(self):
"""
Retrieves all tree nodes within the `Treap()` instance so that all
tree nodes in a certain level will be concatenated into a separate
list.
Returns
-------
list:
A nested list where the first inner-list has all the tree nodes in
the first level, the second inner-list has all the tree nodes in
the second level, ... so on.
Example
-------
>>> treap = Treap([0, 2, 1, 4, 9, 7, 3], seed=123)
>>> treap
__4__
/ \\
2 9
/ \\ /
1 3 7
/
0
>>> treap.get_nodes_per_level()
[[4], [2, 9], [1, 3, 7], [0]]
"""
return super().get_nodes_per_level()
# ============================= PRE-ORDER ==============================
def preorder_traverse(self):
"""
Traverses the `Treap()` instance in pre-order manner. Which means that
the **parent** is visited first. Then, the **left subtree** (if found),
then the **right subtree** (if found).
Note
-----
It's the same as `depth_first_traverse()` method.
Returns
--------
list:
A list of all values of the pre-order visited nodes.
Example
-------
>>> treap = Treap([0, 2, 1, 4, 9, 7, 3], seed=123)
>>> treap
__4__
/ \\
2 9
/ \\ /
1 3 7
/
0
>>> treap.preorder_traverse()
[4, 2, 1, 0, 3, 9, 7]
"""
return super().preorder_traverse()
def depth_first_traverse(self):
"""
Traverses the `Treap()` instance in depth-first manner. Which means
that the **parent** is visited first. Then, the **left subtree**
(if found), then the **right subtree** (if found).
Note
-----
It's the same as `preorder_traverse()` method.
Returns
--------
list:
A list of all values of the pre-order visited nodes.
Example
-------
>>> treap = Treap([0, 2, 1, 4, 9, 7, 3], seed=123)
>>> treap
__4__
/ \\
2 9
/ \\ /
1 3 7
/
0
>>> treap.depth_first_traverse()
[4, 2, 1, 0, 3, 9, 7]
"""
return super().depth_first_traverse()
# ============================= POST-ORDER ==============================
def postorder_traverse(self):
"""
Traverses the `Treap()` instance in post-order manner. Which means that
the **left subtree** (if found) is visited first. Then, the **right
subtree** (if found) then the **parent**.
Returns
--------
list:
A list of all values of the pre-order visited nodes.
Example
-------
>>> treap = Treap([0, 2, 1, 4, 9, 7, 3], seed=123)
>>> treap
__4__
/ \\
2 9
/ \\ /
1 3 7
/
0
>>> treap.postorder_traverse()
[0, 1, 3, 2, 7, 9, 4]
"""
return super().postorder_traverse()
# ============================= IN-ORDER ==============================
def inorder_traverse(self):
"""
Traverses the `Treap()` instance in in-order manner. Which means that
the **left subtree** (if found) is visited first. Then, the **parent**
then the **right subtree** (if found).
Returns
--------
list:
A list of all values of the in-order visited nodes.
Example
-------
>>> treap = Treap([0, 2, 1, 4, 9, 7, 3], seed=123)
>>> treap
__4__
/ \\
2 9
/ \\ /
1 3 7
/
0
>>> treap.inrder_traverse()
[0, 1, 2, 3, 4, 7, 9]
"""
return super().inorder_traverse()
# ============================= BREADTH-FIRST==============================
def breadth_first_traverse(self):
"""
Traverses the `Treap()` instance in breadth-first manner. Which means
that the tree nodes will be visited level by level.
Returns
--------
list:
A list of all values of the pre-order visited nodes.
Example
-------
>>> treap = Treap([0, 2, 1, 4, 9, 7, 3], seed=123)
>>> treap
__4__
/ \\
2 9
/ \\ /
1 3 7
/
0
>>> treap.breadth_first_traverse()
[4, 2, 9, 1, 3, 7, 0]
"""
return super().breadth_first_traverse()
# ============================= TRAVERSE ==============================
def traverse(self, method="inorder"):
"""
Traversal is the process to visit all nodes of a Treap starting from
the root as we cannot randomly access any node in a binary tree. There
are four ways which we use to traverse a Treap:
1. preorder - depth-first
2. inorder
3. posteorder
4. breadth-first
Parameters
----------
method: str (default="inorder")
A lower-cased string describing the type of traversal that will be
used. It could be one of these values: ["inorder", "postorder",
"preorder", "depth-first", "breadth-first"]
Returns
--------
list:
A list of all values of the visited nodes according to the
specified traversal method.
Raises
------
ValueError:
If the given method isn't known.
TypeError:
If the given method isn't a string.
Example
-------
>>> treap = Treap([0, 2, 1, 4, 9, 7, 3], seed=123)
>>> treap
__4__
/ \\
2 9
/ \\ /
1 3 7
/
0
>>> treap.traverse("preorder")
[4, 2, 1, 0, 3, 9, 7]
>>> treap.traverse("inorder")
[0, 1, 2, 3, 4, 7, 9]
>>> treap.traverse("postorder")
[0, 1, 3, 2, 7, 9, 4]
>>> treap.traverse("breadth-first")
[4, 2, 9, 1, 3, 7, 0]
>>> treap.traverse("extra")
ValueError: Given traverse method has to be one of these:
{'breadth-first', 'postorder', 'inorder', 'depth-first', 'preorder'}
"""
return super().traverse(method) | 0.926275 | 0.687361 |
from splinter import Browser
from bs4 import BeautifulSoup as bs
import time
import pandas as pd
import requests
import os
# https://splinter.readthedocs.io/en/latest/drivers/chrome.html
# get_ipython().system('which chromedriver')
def init_browser():
executable_path = {'executable_path': 'chromedriver.exe'}
return Browser("chrome", **executable_path)
def scrape():
browser = init_browser()
### NASA Mars News
url = 'https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest'
browser.visit(url)
html = browser.html
soup = bs(html,"html.parser")
html
news_title = soup.find('div', class_='content_title').text
print(news_title)
news_p=soup.find('div', class_='article_teaser_body').text
print(news_p)
### JPL Mars Space Images - Featured Image
img_url = 'https://www.jpl.nasa.gov/spaceimages/?search%3D%26category%3DMars'
browser.visit(img_url)
secondclick = browser.find_by_id("full_image")
secondclick.click()
thirdclick = browser.find_link_by_partial_text("more info")
thirdclick.click()
html2=browser.html
soup2=bs(html2,'html.parser')
soup2
partial_url = soup2.select_one('figure.lede a img').get("src")
full_url = f'https://www.jpl.nasa.gov{partial_url}'
full_url
### Mars Weather
twitter_url = "https://twitter.com/marswxreport?lang=en"
browser.visit(twitter_url)
html3=browser.html
soup3=bs(html3,'html.parser')
tweeter= soup3.find("div", class_="js-tweet-text-container")
tweeter
tweeter.find("p", "tweet-text").get_text()
mars_weather = tweeter.find("p", "tweet-text").get_text()
mars_weather
### Mars Facts
data_url = 'https://space-facts.com/mars/'
browser.visit(data_url)
html4=browser.html
soup4=bs(html4,'html.parser')
### Mars table
mars_data = pd.read_html(data_url)
mars_data[0]
mars_data_df=mars_data[0]
#Using Pandas to convert the data to a HTML table string.
html_table=mars_data_df.to_html()
html_table
mars_data_df.to_html('mars_table.html')
# Mars Hemispheres
hemispheres_url = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
browser.visit(hemispheres_url)
html5=browser.html
soup4=bs(html5,'html.parser')
hemispheres = soup4.find_all('div', class_='item')
print(hemispheres)
title_img_url = []
for hemisphere in hemispheres:
title = soup4.find("h3").text
img_url = soup4.find('a', class_='itemLink product-item')["href"]
base_url = "https://astrogeology.usgs.gov"
browser.visit(base_url + img_url)
img_url_html = browser.html
img_url_soup = soup4=bs(html5,'html.parser')
full_image_url = base_url + img_url_soup.find("img",class_="thumb")["src"]
title_img_url.append({"Title":title,"Img_url":full_image_url})
# Store data in a dictionary
mars_data1 = {
"news_title": news_title,
"news_p": news_p,
"featured_image_url": full_url,
"mars_weather": mars_weather,
"mars_facts": mars_data,
"hemisphere_image_urls": hemispheres
}
# Close the browser after scraping
browser.quit()
# Return results
return mars_data1 | scrape_mars.py | from splinter import Browser
from bs4 import BeautifulSoup as bs
import time
import pandas as pd
import requests
import os
# https://splinter.readthedocs.io/en/latest/drivers/chrome.html
# get_ipython().system('which chromedriver')
def init_browser():
executable_path = {'executable_path': 'chromedriver.exe'}
return Browser("chrome", **executable_path)
def scrape():
browser = init_browser()
### NASA Mars News
url = 'https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest'
browser.visit(url)
html = browser.html
soup = bs(html,"html.parser")
html
news_title = soup.find('div', class_='content_title').text
print(news_title)
news_p=soup.find('div', class_='article_teaser_body').text
print(news_p)
### JPL Mars Space Images - Featured Image
img_url = 'https://www.jpl.nasa.gov/spaceimages/?search%3D%26category%3DMars'
browser.visit(img_url)
secondclick = browser.find_by_id("full_image")
secondclick.click()
thirdclick = browser.find_link_by_partial_text("more info")
thirdclick.click()
html2=browser.html
soup2=bs(html2,'html.parser')
soup2
partial_url = soup2.select_one('figure.lede a img').get("src")
full_url = f'https://www.jpl.nasa.gov{partial_url}'
full_url
### Mars Weather
twitter_url = "https://twitter.com/marswxreport?lang=en"
browser.visit(twitter_url)
html3=browser.html
soup3=bs(html3,'html.parser')
tweeter= soup3.find("div", class_="js-tweet-text-container")
tweeter
tweeter.find("p", "tweet-text").get_text()
mars_weather = tweeter.find("p", "tweet-text").get_text()
mars_weather
### Mars Facts
data_url = 'https://space-facts.com/mars/'
browser.visit(data_url)
html4=browser.html
soup4=bs(html4,'html.parser')
### Mars table
mars_data = pd.read_html(data_url)
mars_data[0]
mars_data_df=mars_data[0]
#Using Pandas to convert the data to a HTML table string.
html_table=mars_data_df.to_html()
html_table
mars_data_df.to_html('mars_table.html')
# Mars Hemispheres
hemispheres_url = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
browser.visit(hemispheres_url)
html5=browser.html
soup4=bs(html5,'html.parser')
hemispheres = soup4.find_all('div', class_='item')
print(hemispheres)
title_img_url = []
for hemisphere in hemispheres:
title = soup4.find("h3").text
img_url = soup4.find('a', class_='itemLink product-item')["href"]
base_url = "https://astrogeology.usgs.gov"
browser.visit(base_url + img_url)
img_url_html = browser.html
img_url_soup = soup4=bs(html5,'html.parser')
full_image_url = base_url + img_url_soup.find("img",class_="thumb")["src"]
title_img_url.append({"Title":title,"Img_url":full_image_url})
# Store data in a dictionary
mars_data1 = {
"news_title": news_title,
"news_p": news_p,
"featured_image_url": full_url,
"mars_weather": mars_weather,
"mars_facts": mars_data,
"hemisphere_image_urls": hemispheres
}
# Close the browser after scraping
browser.quit()
# Return results
return mars_data1 | 0.322099 | 0.109064 |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='v3/Trash.proto',
package='v3.trash',
syntax='proto3',
serialized_pb=_b('\n\x0ev3/Trash.proto\x12\x08v3.trash\"\xf3\x01\n\x06Level0\x12\'\n\x06level1\x18\x01 \x01(\x0b\x32\x17.v3.trash.Level0.Level1\x12\r\n\x05\x66ield\x18\x02 \x01(\x05\x1a\xb0\x01\n\x06Level1\x12.\n\x06level2\x18\x01 \x01(\x0b\x32\x1e.v3.trash.Level0.Level1.Level2\x12\r\n\x05\x66ield\x18\x02 \x01(\x05\x1ag\n\x06Level2\x12\x35\n\x06level3\x18\x01 \x01(\x0b\x32%.v3.trash.Level0.Level1.Level2.Level3\x12\r\n\x05\x66ield\x18\x02 \x01(\x05\x1a\x17\n\x06Level3\x12\r\n\x05\x66ield\x18\x01 \x01(\x05\x62\x06proto3')
)
_LEVEL0_LEVEL1_LEVEL2_LEVEL3 = _descriptor.Descriptor(
name='Level3',
full_name='v3.trash.Level0.Level1.Level2.Level3',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='field', full_name='v3.trash.Level0.Level1.Level2.Level3.field', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=249,
serialized_end=272,
)
_LEVEL0_LEVEL1_LEVEL2 = _descriptor.Descriptor(
name='Level2',
full_name='v3.trash.Level0.Level1.Level2',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='level3', full_name='v3.trash.Level0.Level1.Level2.level3', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='field', full_name='v3.trash.Level0.Level1.Level2.field', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_LEVEL0_LEVEL1_LEVEL2_LEVEL3, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=169,
serialized_end=272,
)
_LEVEL0_LEVEL1 = _descriptor.Descriptor(
name='Level1',
full_name='v3.trash.Level0.Level1',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='level2', full_name='v3.trash.Level0.Level1.level2', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='field', full_name='v3.trash.Level0.Level1.field', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_LEVEL0_LEVEL1_LEVEL2, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=96,
serialized_end=272,
)
_LEVEL0 = _descriptor.Descriptor(
name='Level0',
full_name='v3.trash.Level0',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='level1', full_name='v3.trash.Level0.level1', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='field', full_name='v3.trash.Level0.field', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_LEVEL0_LEVEL1, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=29,
serialized_end=272,
)
_LEVEL0_LEVEL1_LEVEL2_LEVEL3.containing_type = _LEVEL0_LEVEL1_LEVEL2
_LEVEL0_LEVEL1_LEVEL2.fields_by_name['level3'].message_type = _LEVEL0_LEVEL1_LEVEL2_LEVEL3
_LEVEL0_LEVEL1_LEVEL2.containing_type = _LEVEL0_LEVEL1
_LEVEL0_LEVEL1.fields_by_name['level2'].message_type = _LEVEL0_LEVEL1_LEVEL2
_LEVEL0_LEVEL1.containing_type = _LEVEL0
_LEVEL0.fields_by_name['level1'].message_type = _LEVEL0_LEVEL1
DESCRIPTOR.message_types_by_name['Level0'] = _LEVEL0
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Level0 = _reflection.GeneratedProtocolMessageType('Level0', (_message.Message,), dict(
Level1 = _reflection.GeneratedProtocolMessageType('Level1', (_message.Message,), dict(
Level2 = _reflection.GeneratedProtocolMessageType('Level2', (_message.Message,), dict(
Level3 = _reflection.GeneratedProtocolMessageType('Level3', (_message.Message,), dict(
DESCRIPTOR = _LEVEL0_LEVEL1_LEVEL2_LEVEL3,
__module__ = 'v3.Trash_pb2'
# @@protoc_insertion_point(class_scope:v3.trash.Level0.Level1.Level2.Level3)
))
,
DESCRIPTOR = _LEVEL0_LEVEL1_LEVEL2,
__module__ = 'v3.Trash_pb2'
# @@protoc_insertion_point(class_scope:v3.trash.Level0.Level1.Level2)
))
,
DESCRIPTOR = _LEVEL0_LEVEL1,
__module__ = 'v3.Trash_pb2'
# @@protoc_insertion_point(class_scope:v3.trash.Level0.Level1)
))
,
DESCRIPTOR = _LEVEL0,
__module__ = 'v3.Trash_pb2'
# @@protoc_insertion_point(class_scope:v3.trash.Level0)
))
_sym_db.RegisterMessage(Level0)
_sym_db.RegisterMessage(Level0.Level1)
_sym_db.RegisterMessage(Level0.Level1.Level2)
_sym_db.RegisterMessage(Level0.Level1.Level2.Level3)
# @@protoc_insertion_point(module_scope) | src/py/proto/v3/Trash_pb2.py |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='v3/Trash.proto',
package='v3.trash',
syntax='proto3',
serialized_pb=_b('\n\x0ev3/Trash.proto\x12\x08v3.trash\"\xf3\x01\n\x06Level0\x12\'\n\x06level1\x18\x01 \x01(\x0b\x32\x17.v3.trash.Level0.Level1\x12\r\n\x05\x66ield\x18\x02 \x01(\x05\x1a\xb0\x01\n\x06Level1\x12.\n\x06level2\x18\x01 \x01(\x0b\x32\x1e.v3.trash.Level0.Level1.Level2\x12\r\n\x05\x66ield\x18\x02 \x01(\x05\x1ag\n\x06Level2\x12\x35\n\x06level3\x18\x01 \x01(\x0b\x32%.v3.trash.Level0.Level1.Level2.Level3\x12\r\n\x05\x66ield\x18\x02 \x01(\x05\x1a\x17\n\x06Level3\x12\r\n\x05\x66ield\x18\x01 \x01(\x05\x62\x06proto3')
)
_LEVEL0_LEVEL1_LEVEL2_LEVEL3 = _descriptor.Descriptor(
name='Level3',
full_name='v3.trash.Level0.Level1.Level2.Level3',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='field', full_name='v3.trash.Level0.Level1.Level2.Level3.field', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=249,
serialized_end=272,
)
_LEVEL0_LEVEL1_LEVEL2 = _descriptor.Descriptor(
name='Level2',
full_name='v3.trash.Level0.Level1.Level2',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='level3', full_name='v3.trash.Level0.Level1.Level2.level3', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='field', full_name='v3.trash.Level0.Level1.Level2.field', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_LEVEL0_LEVEL1_LEVEL2_LEVEL3, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=169,
serialized_end=272,
)
_LEVEL0_LEVEL1 = _descriptor.Descriptor(
name='Level1',
full_name='v3.trash.Level0.Level1',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='level2', full_name='v3.trash.Level0.Level1.level2', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='field', full_name='v3.trash.Level0.Level1.field', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_LEVEL0_LEVEL1_LEVEL2, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=96,
serialized_end=272,
)
_LEVEL0 = _descriptor.Descriptor(
name='Level0',
full_name='v3.trash.Level0',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='level1', full_name='v3.trash.Level0.level1', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='field', full_name='v3.trash.Level0.field', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_LEVEL0_LEVEL1, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=29,
serialized_end=272,
)
_LEVEL0_LEVEL1_LEVEL2_LEVEL3.containing_type = _LEVEL0_LEVEL1_LEVEL2
_LEVEL0_LEVEL1_LEVEL2.fields_by_name['level3'].message_type = _LEVEL0_LEVEL1_LEVEL2_LEVEL3
_LEVEL0_LEVEL1_LEVEL2.containing_type = _LEVEL0_LEVEL1
_LEVEL0_LEVEL1.fields_by_name['level2'].message_type = _LEVEL0_LEVEL1_LEVEL2
_LEVEL0_LEVEL1.containing_type = _LEVEL0
_LEVEL0.fields_by_name['level1'].message_type = _LEVEL0_LEVEL1
DESCRIPTOR.message_types_by_name['Level0'] = _LEVEL0
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Level0 = _reflection.GeneratedProtocolMessageType('Level0', (_message.Message,), dict(
Level1 = _reflection.GeneratedProtocolMessageType('Level1', (_message.Message,), dict(
Level2 = _reflection.GeneratedProtocolMessageType('Level2', (_message.Message,), dict(
Level3 = _reflection.GeneratedProtocolMessageType('Level3', (_message.Message,), dict(
DESCRIPTOR = _LEVEL0_LEVEL1_LEVEL2_LEVEL3,
__module__ = 'v3.Trash_pb2'
# @@protoc_insertion_point(class_scope:v3.trash.Level0.Level1.Level2.Level3)
))
,
DESCRIPTOR = _LEVEL0_LEVEL1_LEVEL2,
__module__ = 'v3.Trash_pb2'
# @@protoc_insertion_point(class_scope:v3.trash.Level0.Level1.Level2)
))
,
DESCRIPTOR = _LEVEL0_LEVEL1,
__module__ = 'v3.Trash_pb2'
# @@protoc_insertion_point(class_scope:v3.trash.Level0.Level1)
))
,
DESCRIPTOR = _LEVEL0,
__module__ = 'v3.Trash_pb2'
# @@protoc_insertion_point(class_scope:v3.trash.Level0)
))
_sym_db.RegisterMessage(Level0)
_sym_db.RegisterMessage(Level0.Level1)
_sym_db.RegisterMessage(Level0.Level1.Level2)
_sym_db.RegisterMessage(Level0.Level1.Level2.Level3)
# @@protoc_insertion_point(module_scope) | 0.273089 | 0.112844 |
import tensorflow as tf
import numpy as np
import functools
def lazy_property(function):
"""
Decorator to help structure graphs.
Taken from https://danijar.com/structuring-your-tensorflow-models/
"""
attribute = '_cache_' + function.__name__
@property
@functools.wraps(function)
def decorator(self):
if not hasattr(self, attribute):
setattr(self, attribute, function(self))
return getattr(self, attribute)
return decorator
class Network:
"""Neural network for multi-class classification."""
def __init__(self, in_dims, num_classes):
"""Build the computation graph."""
tf.reset_default_graph()
tf.set_random_seed(1234)
# Data
self.num_classes = num_classes
self.input = tf.placeholder(tf.float32, shape=(None, in_dims))
self.labels = tf.placeholder(tf.int32, shape=None)
# Hyperparameters
self.learning_rate = tf.placeholder(tf.float32)
# Graph. In __init__ method to force execution when Network
# object is instantiated.
self.logits
self.prediction
self.loss
self.opt
self.saver = tf.train.Saver()
@lazy_property
def logits(self):
return tf.layers.dense(self.input, self.num_classes)
@lazy_property
def prediction(self):
return tf.argmax(self.logits, axis=1)
@lazy_property
def loss(self):
return tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=self.labels,
logits=self.logits)
@lazy_property
def opt(self):
return tf.train.AdamOptimizer(learning_rate=self.learning_rate)\
.minimize(self.loss)
def train(self, train_data, valid_data, params,
save_path="./tmp/model.ckpt"):
"""
Train the neural network and save the model.
If both validation input and labels are provided then the model's
accuracy is evaluated on the validation data at the end of every epoch.
Args:
train_data: Dictionary of training input and labels. Must have
form:
{'input': (2D numpy array of floats),
'labels': (1D numpy array of ints)}
The numpy array of inputs must have shape (
data_points, feature_vector_length) that is the
training input.
The numpy array of labels must have the
same length as the number of rows of the
inputs.
valid_data: Dictionary of validation input and labels. Must
have same form as train_data.
params: Dictionary of hyperparameters for the neural network
training. Must have the following form:
{'num_epochs': (int),
'learning_rate': (float),
'batch_size': (int)}
These values have their usual meaning in the
context of training a neural network.
save_path: Filepath to save the model checkpoint to.
Returns:
Nothing.
"""
np.random.seed(42)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(params['num_epochs']):
print('Training epoch {}'.format(epoch))
# Shuffle indices not data.
shuffle_idx = np.arange(train_data['input'].shape[0])
np.random.shuffle(shuffle_idx)
for idx in range(0, len(shuffle_idx), params['batch_size']):
i = shuffle_idx[idx:idx+params['batch_size']]
feed = {self.input: train_data['input'][i, :],
self.labels: train_data['labels'][i],
self.learning_rate: params['learning_rate']}
_, loss = sess.run([self.opt, self.loss], feed_dict=feed)
print('Loss: {:.2f}'.format(loss[0]))
# Validation test
percent_correct = self._validate(sess, valid_data, params)
print('Validation accuracy: {:.2f}%'.format(percent_correct))
self.saver.save(sess, save_path)
print("Model saved in path: %s" % save_path)
def _validate(self, sess, valid_data, params):
total_results = 0
total_correct = 0
for i in range(0, valid_data['input'].shape[0],
params['batch_size']):
feed = {self.input: valid_data['input'][i:i + params[
'batch_size'], :]}
out = sess.run(self.prediction, feed_dict=feed)
correct = np.equal(out,
valid_data['labels'][i:i+params['batch_size']])
total_results += correct.size
total_correct += np.sum(correct)
percent_correct = 100 * total_correct / total_results
return percent_correct
def predict(self, feature_vectors, restore_path="./tmp/model.ckpt"):
"""
Predict the label of an input.
Args:
feature_vectors: 2D numpy array of feature vectors. One row per
input. Feature vector length must be the same
as the length used in the neural network's
training.
restore_path: Path to model to restore.
Returns: Integer corresponding to the prediction.
"""
with tf.Session() as sess:
self.saver.restore(sess, restore_path)
print("Model restored from path: %s" % restore_path)
feed = {self.input: feature_vectors}
pred = sess.run(self.prediction, feed_dict=feed)
return pred
def evaluate(self, test_input, test_labels, batch_size=2,
restore_path="./tmp/model.ckpt"):
"""
Evaluate the performance of the model on test data.
Args:
test_input: 2D numpy array of floats giving the training input.
Shape of array must be (data_points,
feature_vector_length)
test_labels: 1D numpy array of ints giving the (enumerated)
labels. Length must match the number of rows of
train_input.
batch_size: Batch size for testing. Does not affect results,
only speed.
restore_path: Filepath of checkpoint file from which to restore
the model.
Returns:
Nothing.
"""
total_results = 0
total_correct = 0
with tf.Session() as sess:
self.saver.restore(sess, restore_path)
print("Model restored from path: %s" % restore_path)
for i in range(0, test_input.shape[0], batch_size):
feed = {self.input: test_input[i:i + batch_size, :]}
out = sess.run(self.prediction, feed_dict=feed)
correct = np.equal(out, test_labels[i:i+batch_size])
total_results += correct.size
total_correct += np.sum(correct)
print('Test accuracy: {:.2f}%'.format(100 * total_correct /
total_results)) | image_transfer_learning/network.py |
import tensorflow as tf
import numpy as np
import functools
def lazy_property(function):
"""
Decorator to help structure graphs.
Taken from https://danijar.com/structuring-your-tensorflow-models/
"""
attribute = '_cache_' + function.__name__
@property
@functools.wraps(function)
def decorator(self):
if not hasattr(self, attribute):
setattr(self, attribute, function(self))
return getattr(self, attribute)
return decorator
class Network:
"""Neural network for multi-class classification."""
def __init__(self, in_dims, num_classes):
"""Build the computation graph."""
tf.reset_default_graph()
tf.set_random_seed(1234)
# Data
self.num_classes = num_classes
self.input = tf.placeholder(tf.float32, shape=(None, in_dims))
self.labels = tf.placeholder(tf.int32, shape=None)
# Hyperparameters
self.learning_rate = tf.placeholder(tf.float32)
# Graph. In __init__ method to force execution when Network
# object is instantiated.
self.logits
self.prediction
self.loss
self.opt
self.saver = tf.train.Saver()
@lazy_property
def logits(self):
return tf.layers.dense(self.input, self.num_classes)
@lazy_property
def prediction(self):
return tf.argmax(self.logits, axis=1)
@lazy_property
def loss(self):
return tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=self.labels,
logits=self.logits)
@lazy_property
def opt(self):
return tf.train.AdamOptimizer(learning_rate=self.learning_rate)\
.minimize(self.loss)
def train(self, train_data, valid_data, params,
save_path="./tmp/model.ckpt"):
"""
Train the neural network and save the model.
If both validation input and labels are provided then the model's
accuracy is evaluated on the validation data at the end of every epoch.
Args:
train_data: Dictionary of training input and labels. Must have
form:
{'input': (2D numpy array of floats),
'labels': (1D numpy array of ints)}
The numpy array of inputs must have shape (
data_points, feature_vector_length) that is the
training input.
The numpy array of labels must have the
same length as the number of rows of the
inputs.
valid_data: Dictionary of validation input and labels. Must
have same form as train_data.
params: Dictionary of hyperparameters for the neural network
training. Must have the following form:
{'num_epochs': (int),
'learning_rate': (float),
'batch_size': (int)}
These values have their usual meaning in the
context of training a neural network.
save_path: Filepath to save the model checkpoint to.
Returns:
Nothing.
"""
np.random.seed(42)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(params['num_epochs']):
print('Training epoch {}'.format(epoch))
# Shuffle indices not data.
shuffle_idx = np.arange(train_data['input'].shape[0])
np.random.shuffle(shuffle_idx)
for idx in range(0, len(shuffle_idx), params['batch_size']):
i = shuffle_idx[idx:idx+params['batch_size']]
feed = {self.input: train_data['input'][i, :],
self.labels: train_data['labels'][i],
self.learning_rate: params['learning_rate']}
_, loss = sess.run([self.opt, self.loss], feed_dict=feed)
print('Loss: {:.2f}'.format(loss[0]))
# Validation test
percent_correct = self._validate(sess, valid_data, params)
print('Validation accuracy: {:.2f}%'.format(percent_correct))
self.saver.save(sess, save_path)
print("Model saved in path: %s" % save_path)
def _validate(self, sess, valid_data, params):
total_results = 0
total_correct = 0
for i in range(0, valid_data['input'].shape[0],
params['batch_size']):
feed = {self.input: valid_data['input'][i:i + params[
'batch_size'], :]}
out = sess.run(self.prediction, feed_dict=feed)
correct = np.equal(out,
valid_data['labels'][i:i+params['batch_size']])
total_results += correct.size
total_correct += np.sum(correct)
percent_correct = 100 * total_correct / total_results
return percent_correct
def predict(self, feature_vectors, restore_path="./tmp/model.ckpt"):
"""
Predict the label of an input.
Args:
feature_vectors: 2D numpy array of feature vectors. One row per
input. Feature vector length must be the same
as the length used in the neural network's
training.
restore_path: Path to model to restore.
Returns: Integer corresponding to the prediction.
"""
with tf.Session() as sess:
self.saver.restore(sess, restore_path)
print("Model restored from path: %s" % restore_path)
feed = {self.input: feature_vectors}
pred = sess.run(self.prediction, feed_dict=feed)
return pred
def evaluate(self, test_input, test_labels, batch_size=2,
restore_path="./tmp/model.ckpt"):
"""
Evaluate the performance of the model on test data.
Args:
test_input: 2D numpy array of floats giving the training input.
Shape of array must be (data_points,
feature_vector_length)
test_labels: 1D numpy array of ints giving the (enumerated)
labels. Length must match the number of rows of
train_input.
batch_size: Batch size for testing. Does not affect results,
only speed.
restore_path: Filepath of checkpoint file from which to restore
the model.
Returns:
Nothing.
"""
total_results = 0
total_correct = 0
with tf.Session() as sess:
self.saver.restore(sess, restore_path)
print("Model restored from path: %s" % restore_path)
for i in range(0, test_input.shape[0], batch_size):
feed = {self.input: test_input[i:i + batch_size, :]}
out = sess.run(self.prediction, feed_dict=feed)
correct = np.equal(out, test_labels[i:i+batch_size])
total_results += correct.size
total_correct += np.sum(correct)
print('Test accuracy: {:.2f}%'.format(100 * total_correct /
total_results)) | 0.898921 | 0.497986 |
import numpy as np
import pandas as pd
import json
from pprint import pprint
import parser
def c_j(H, b, j, maximiser=1) :
"""indice de concordance partiel selon le critère j, max=1 si le critère est à maximiser,0 si non"""
print(" ------B----- ", b)
print("MAXIMISER ", type(maximiser), " H ", H[j], " Type ", type(H[j]))
print(" TESTB ", b[j], " TYPE ", type(b[j]))
return (int(H[j] >= b[j]) if (maximiser) else int(H[j] <= b[j]))
def C(H, b, maximiser_list, poids) :
"""indice de condordance global entre H et b"""
# print("TAIIILLLE : ", b.shape)
N = b.shape[0]
c = np.array([c_j(H, b, j, maximiser=maximiser_list[j]) for j in range(N)])
return (np.dot(poids, c) / np.array(poids).sum())
def S(H, b, maximiser_list, poids, Lambda) :
"""Vrai si candidat H surclasse le profile b"""
return (C(H, b, maximiser_list, poids) >= Lambda)
def pareto_dominance_ij(b_sup, b_inf) :
"""Vrai si b_sup est meilleur que b_inf sur tous les critères"""
return np.all(b_sup > b_inf)
def pareto_dominance(profiles) :
"""Vrai si la pareto dominance stricte est vérifiée pour tous les profiles"""
for idx in range(len(profiles.index) - 1) :
b_inf = profiles.loc[profiles.index[idx]]
b_sup = profiles.loc[profiles.index[idx + 1]]
if (not pareto_dominance_ij(b_sup, b_inf)) :
return False
return True
# pareto_dominance(profiles)
def EvalOptimiste(H, profiles, maximiser_list, poids, Lambda) :
"""Effectue un classement optimiste du candidat H"""
rang = profiles.index
for i in rang :
#on recupère le ieme profile
b = profiles.loc[i]
if (S(b, H, maximiser_list, poids, Lambda)) :
return int(i) - 1
return rand.max()
def EvalPessimiste(H, profiles, maximiser_list, poids, Lambda) :
"""Effectue un classement optimiste du candidat H"""
rang = profiles.index
for i in rang[::-1] :
#on recupère le ieme profile
b = profiles.loc[i]
if (S(H, b, maximiser_list, poids, Lambda)) :
return int(i)
return rand.min() | FlaskApp/algo3.py | import numpy as np
import pandas as pd
import json
from pprint import pprint
import parser
def c_j(H, b, j, maximiser=1) :
"""indice de concordance partiel selon le critère j, max=1 si le critère est à maximiser,0 si non"""
print(" ------B----- ", b)
print("MAXIMISER ", type(maximiser), " H ", H[j], " Type ", type(H[j]))
print(" TESTB ", b[j], " TYPE ", type(b[j]))
return (int(H[j] >= b[j]) if (maximiser) else int(H[j] <= b[j]))
def C(H, b, maximiser_list, poids) :
"""indice de condordance global entre H et b"""
# print("TAIIILLLE : ", b.shape)
N = b.shape[0]
c = np.array([c_j(H, b, j, maximiser=maximiser_list[j]) for j in range(N)])
return (np.dot(poids, c) / np.array(poids).sum())
def S(H, b, maximiser_list, poids, Lambda) :
"""Vrai si candidat H surclasse le profile b"""
return (C(H, b, maximiser_list, poids) >= Lambda)
def pareto_dominance_ij(b_sup, b_inf) :
"""Vrai si b_sup est meilleur que b_inf sur tous les critères"""
return np.all(b_sup > b_inf)
def pareto_dominance(profiles) :
"""Vrai si la pareto dominance stricte est vérifiée pour tous les profiles"""
for idx in range(len(profiles.index) - 1) :
b_inf = profiles.loc[profiles.index[idx]]
b_sup = profiles.loc[profiles.index[idx + 1]]
if (not pareto_dominance_ij(b_sup, b_inf)) :
return False
return True
# pareto_dominance(profiles)
def EvalOptimiste(H, profiles, maximiser_list, poids, Lambda) :
"""Effectue un classement optimiste du candidat H"""
rang = profiles.index
for i in rang :
#on recupère le ieme profile
b = profiles.loc[i]
if (S(b, H, maximiser_list, poids, Lambda)) :
return int(i) - 1
return rand.max()
def EvalPessimiste(H, profiles, maximiser_list, poids, Lambda) :
"""Effectue un classement optimiste du candidat H"""
rang = profiles.index
for i in rang[::-1] :
#on recupère le ieme profile
b = profiles.loc[i]
if (S(H, b, maximiser_list, poids, Lambda)) :
return int(i)
return rand.min() | 0.210279 | 0.387806 |
from typing import Optional
import pyexlatex as pl
import pyexlatex.table as lt
import pyexlatex.presentation as lp
import pyexlatex.graphics as lg
import pyexlatex.layouts as ll
import more_itertools
class _LabBlock(lp.Block):
def __init__(self, content, color: str = 'violet', **kwargs):
super().__init__(content, header_color=color, **kwargs)
class LabBlock(pl.Template):
def __init__(self, content, bottom_content: Optional = None, **kwargs):
if not isinstance(content, (list, tuple)):
content = [content]
if bottom_content is None:
bottom_content = []
if not isinstance(bottom_content, (list, tuple)):
bottom_content = [bottom_content]
self.content = content
self.bottom_content = bottom_content
self.kwargs = kwargs
self.contents = self._get_contents()
super().__init__()
def _get_contents(self):
contents = [
*self.content,
pl.VFill(),
]
if self.bottom_content:
bottom_contents = list(more_itertools.chunked(self.bottom_content, 3))
if len(bottom_contents) > 1:
# Multiple rows
new_bottom_contents = []
for content_row in bottom_contents:
# Deal with incomplete rows
if len(content_row) == 1:
# Single item, center it
value = content_row[0]
new_bottom_contents.append(['', value, ''])
elif len(content_row) == 2:
# Two items, put on edges
value1, value2 = content_row
new_bottom_contents.append([value1, '', value2])
else:
new_bottom_contents.append(content_row)
bottom_contents = new_bottom_contents
# Add padding
new_bottom_contents = []
for row in bottom_contents:
new_bottom_contents.append([pl.HFill(), *row, pl.HFill()])
bottom_contents = new_bottom_contents
align = 'c' * len(bottom_contents[0])
tab = lt.TabularStar(
[
lt.TopRule(),
lt.ValuesTable.from_list_of_lists(bottom_contents)
],
align=align
)
tab = self.format_contents(tab)
contents.append(tab)
lb = _LabBlock(
contents,
**self.kwargs
)
return lb
@property
def all_bottom_contents(self):
if not self.bottom_content:
return []
contents = [
pl.HFill(),
*self.bottom_content,
pl.HFill()
]
return self.format_contents(contents)
class InClassExampleBlock(lp.Block):
def __init__(self, content, **kwargs):
green = pl.RGB(31, 156, 17, color_name='darkgreen')
self.init_data()
self.data.packages.append(green)
super().__init__(content, header_color='darkgreen', **kwargs) | fin_model_course/pltemplates/blocks.py | from typing import Optional
import pyexlatex as pl
import pyexlatex.table as lt
import pyexlatex.presentation as lp
import pyexlatex.graphics as lg
import pyexlatex.layouts as ll
import more_itertools
class _LabBlock(lp.Block):
def __init__(self, content, color: str = 'violet', **kwargs):
super().__init__(content, header_color=color, **kwargs)
class LabBlock(pl.Template):
def __init__(self, content, bottom_content: Optional = None, **kwargs):
if not isinstance(content, (list, tuple)):
content = [content]
if bottom_content is None:
bottom_content = []
if not isinstance(bottom_content, (list, tuple)):
bottom_content = [bottom_content]
self.content = content
self.bottom_content = bottom_content
self.kwargs = kwargs
self.contents = self._get_contents()
super().__init__()
def _get_contents(self):
contents = [
*self.content,
pl.VFill(),
]
if self.bottom_content:
bottom_contents = list(more_itertools.chunked(self.bottom_content, 3))
if len(bottom_contents) > 1:
# Multiple rows
new_bottom_contents = []
for content_row in bottom_contents:
# Deal with incomplete rows
if len(content_row) == 1:
# Single item, center it
value = content_row[0]
new_bottom_contents.append(['', value, ''])
elif len(content_row) == 2:
# Two items, put on edges
value1, value2 = content_row
new_bottom_contents.append([value1, '', value2])
else:
new_bottom_contents.append(content_row)
bottom_contents = new_bottom_contents
# Add padding
new_bottom_contents = []
for row in bottom_contents:
new_bottom_contents.append([pl.HFill(), *row, pl.HFill()])
bottom_contents = new_bottom_contents
align = 'c' * len(bottom_contents[0])
tab = lt.TabularStar(
[
lt.TopRule(),
lt.ValuesTable.from_list_of_lists(bottom_contents)
],
align=align
)
tab = self.format_contents(tab)
contents.append(tab)
lb = _LabBlock(
contents,
**self.kwargs
)
return lb
@property
def all_bottom_contents(self):
if not self.bottom_content:
return []
contents = [
pl.HFill(),
*self.bottom_content,
pl.HFill()
]
return self.format_contents(contents)
class InClassExampleBlock(lp.Block):
def __init__(self, content, **kwargs):
green = pl.RGB(31, 156, 17, color_name='darkgreen')
self.init_data()
self.data.packages.append(green)
super().__init__(content, header_color='darkgreen', **kwargs) | 0.709724 | 0.154887 |
import pandas as pd
def two_values_melt(df, first_value_vars, second_value_vars, var_name,
value_name):
"""
First, build two DataFrames from the original one: one to compute a melt
for the value, another one to compute a melt for the evolution. Second,
merge these two DataFrames. The idea is to go from something like this:
| ... | <some1> | <some2> | <some1_evol> | <some2_evol> |
| ... | <val1> | <val2> | <evol1> | <evol2> |
to something like that:
| ... | variable | value | evolution
| ... | --------- | ------ | ---------
| ... | <some1> | <val1> | <evol1>
| ... | <some2> | <val2> | <evol2>
Args:
df (DataFrame): DataFrame to process
first_value_vars (list): value_vars of a pandas melt, for the first
value columns of the DataFrame
second_value_vars (list): value_vars of a pandas melt, for the second
value columns of the DataFrame
var_name (str): var_names of a pandas melt
value_name (str): value_name of a pandas melt
Notes:
In tests/app/fixtures, you will find example files for the input and
output data (respectively two_values_melt_in.csv and
two_values_melt_out.csv)
Returns:
DataFrame: molted DataFrame with two value (value and evolution for
example) columns
"""
value_name_first = value_name + '_first'
value_name_second = value_name + '_second'
# Melt on the first value columns
melt_first_value = pd.melt(df,
id_vars=[col for col in list(df) if
col not in first_value_vars],
value_vars=first_value_vars,
var_name=var_name,
value_name=value_name_first)
melt_first_value.drop(second_value_vars, axis=1, inplace=True)
# Melt on the second value columns
melt_second_value = pd.melt(df,
id_vars=[col for col in list(df) if
col not in second_value_vars],
value_vars=second_value_vars,
var_name=var_name,
value_name=value_name_second)
# Since there are two value columns, there is no need to keep the
# second_value_vars names. And it will make things easier for the merge.
normalize_types = {k: v for k, v in zip(second_value_vars, first_value_vars)}
melt_second_value.replace(normalize_types, inplace=True)
melt_second_value.drop(first_value_vars, axis=1, inplace=True)
on_cols = list(melt_first_value)
on_cols.remove(value_name_first)
return pd.merge(melt_first_value, melt_second_value, on=on_cols,
how='outer') | toucan_data_sdk/utils/generic/two_values_melt.py | import pandas as pd
def two_values_melt(df, first_value_vars, second_value_vars, var_name,
value_name):
"""
First, build two DataFrames from the original one: one to compute a melt
for the value, another one to compute a melt for the evolution. Second,
merge these two DataFrames. The idea is to go from something like this:
| ... | <some1> | <some2> | <some1_evol> | <some2_evol> |
| ... | <val1> | <val2> | <evol1> | <evol2> |
to something like that:
| ... | variable | value | evolution
| ... | --------- | ------ | ---------
| ... | <some1> | <val1> | <evol1>
| ... | <some2> | <val2> | <evol2>
Args:
df (DataFrame): DataFrame to process
first_value_vars (list): value_vars of a pandas melt, for the first
value columns of the DataFrame
second_value_vars (list): value_vars of a pandas melt, for the second
value columns of the DataFrame
var_name (str): var_names of a pandas melt
value_name (str): value_name of a pandas melt
Notes:
In tests/app/fixtures, you will find example files for the input and
output data (respectively two_values_melt_in.csv and
two_values_melt_out.csv)
Returns:
DataFrame: molted DataFrame with two value (value and evolution for
example) columns
"""
value_name_first = value_name + '_first'
value_name_second = value_name + '_second'
# Melt on the first value columns
melt_first_value = pd.melt(df,
id_vars=[col for col in list(df) if
col not in first_value_vars],
value_vars=first_value_vars,
var_name=var_name,
value_name=value_name_first)
melt_first_value.drop(second_value_vars, axis=1, inplace=True)
# Melt on the second value columns
melt_second_value = pd.melt(df,
id_vars=[col for col in list(df) if
col not in second_value_vars],
value_vars=second_value_vars,
var_name=var_name,
value_name=value_name_second)
# Since there are two value columns, there is no need to keep the
# second_value_vars names. And it will make things easier for the merge.
normalize_types = {k: v for k, v in zip(second_value_vars, first_value_vars)}
melt_second_value.replace(normalize_types, inplace=True)
melt_second_value.drop(first_value_vars, axis=1, inplace=True)
on_cols = list(melt_first_value)
on_cols.remove(value_name_first)
return pd.merge(melt_first_value, melt_second_value, on=on_cols,
how='outer') | 0.813942 | 0.672143 |
# Imports
import re
import os
import datetime
# Functions
'''
Primary handle function
Get request header, check method and path, and create response accordingly
'''
def http_handle(request_string):
assert not isinstance(request_string, bytes)
req_header = http_get_header(request_string)
path = req_header.get('GET')[0]
status = 200
if not http_is_get(req_header):
status = 403
if not http_is_valid_path(path):
status = 404
res_header = http_create_res_header(status)
if status > 200:
return res_header
res_body = http_create_res_body(path)
response = res_header + '\n'
response += res_body
return response
'''
Parse request to create name/value header object
'''
def http_get_header(request):
request_arr = request.splitlines()
header = {}
for line in request_arr:
if len(line) > 0:
header_arr = re.split('\s+|:\s', line)
header[ header_arr[0] ] = header_arr[1:]
return header
'''
Check method of request
True only if GET request
'''
def http_is_get(req_headers):
return 'GET' in req_headers.keys()
'''
Confirm path is valid.
True only if path is to file and file exists in data directory
'''
def http_is_valid_path(path):
if not path:
return False
dir = re.search('^(.*/)', path)
file = re.search('^.*/(\w+\.\w*)$', path)
if not file:
return False
target = os.scandir('data/' + dir.group(1))
for item in target:
if not item.is_file():
pass
else:
if item.name == file.group(1):
return True
return False
'''
Craft response header. Status is OK only if GET request. Otherwise 403/404
Additionally adds Date, Server, and Content-Type headers where applicable
i.e.
HTTP/1.1 200 OK
Content-Type: text/html
Date: Wed, 10 Aug 2019 12:00:00
Server: Python/3.7.4
'''
def http_create_res_header(status):
header = 'HTTP/1.1'
if status == 403:
header += ' 403 Forbidden\n'
elif status == 404:
header += ' 404 Not Found\n'
else:
header += ' 200 OK\n'
header += 'Content-Type: text/html\n'
date = datetime.datetime.now()
header += 'Date: ' + date.strftime("%a, %d %b %Y %H:%M:%S %Z") + '\n'
header += 'Server: Python/3.7.4\n'
return header
'''
Fetch file data from within data directory and create response body (assumes valid path)
'''
def http_create_res_body(path):
print(path)
with open('data' + path, 'r') as file:
return file.read() | functions.py |
# Imports
import re
import os
import datetime
# Functions
'''
Primary handle function
Get request header, check method and path, and create response accordingly
'''
def http_handle(request_string):
assert not isinstance(request_string, bytes)
req_header = http_get_header(request_string)
path = req_header.get('GET')[0]
status = 200
if not http_is_get(req_header):
status = 403
if not http_is_valid_path(path):
status = 404
res_header = http_create_res_header(status)
if status > 200:
return res_header
res_body = http_create_res_body(path)
response = res_header + '\n'
response += res_body
return response
'''
Parse request to create name/value header object
'''
def http_get_header(request):
request_arr = request.splitlines()
header = {}
for line in request_arr:
if len(line) > 0:
header_arr = re.split('\s+|:\s', line)
header[ header_arr[0] ] = header_arr[1:]
return header
'''
Check method of request
True only if GET request
'''
def http_is_get(req_headers):
return 'GET' in req_headers.keys()
'''
Confirm path is valid.
True only if path is to file and file exists in data directory
'''
def http_is_valid_path(path):
if not path:
return False
dir = re.search('^(.*/)', path)
file = re.search('^.*/(\w+\.\w*)$', path)
if not file:
return False
target = os.scandir('data/' + dir.group(1))
for item in target:
if not item.is_file():
pass
else:
if item.name == file.group(1):
return True
return False
'''
Craft response header. Status is OK only if GET request. Otherwise 403/404
Additionally adds Date, Server, and Content-Type headers where applicable
i.e.
HTTP/1.1 200 OK
Content-Type: text/html
Date: Wed, 10 Aug 2019 12:00:00
Server: Python/3.7.4
'''
def http_create_res_header(status):
header = 'HTTP/1.1'
if status == 403:
header += ' 403 Forbidden\n'
elif status == 404:
header += ' 404 Not Found\n'
else:
header += ' 200 OK\n'
header += 'Content-Type: text/html\n'
date = datetime.datetime.now()
header += 'Date: ' + date.strftime("%a, %d %b %Y %H:%M:%S %Z") + '\n'
header += 'Server: Python/3.7.4\n'
return header
'''
Fetch file data from within data directory and create response body (assumes valid path)
'''
def http_create_res_body(path):
print(path)
with open('data' + path, 'r') as file:
return file.read() | 0.385143 | 0.110952 |
from distutils.version import LooseVersion
import keras
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten, Dropout
if LooseVersion(keras.__version__) >= LooseVersion('2.0.0'):
from keras.layers import Conv2D
else:
from keras.layers import Convolution2D
import math
def conv_2d(filters, kernel_shape, strides, padding, input_shape=None):
"""
Defines the right convolutional layer according to the
version of Keras that is installed.
:param filters: (required integer) the dimensionality of the output
space (i.e. the number output of filters in the
convolution)
:param kernel_shape: (required tuple or list of 2 integers) specifies
the strides of the convolution along the width and
height.
:param padding: (required string) can be either 'valid' (no padding around
input or feature map) or 'same' (pad to ensure that the
output feature map size is identical to the layer input)
:param input_shape: (optional) give input shape if this is the first
layer of the model
:return: the Keras layer
"""
def modelB(logits=False, input_ph=None, img_rows=28, img_cols=28,
channels=1, nb_filters=64, nb_classes=10):
"""
Defines a CNN model using Keras sequential model
:param logits: If set to False, returns a Keras model, otherwise will also
return logits tensor
:param input_ph: The TensorFlow tensor for the input
(needed if returning logits)
("ph" stands for placeholder but it need not actually be a
placeholder)
:param img_rows: number of row in the image
:param img_cols: number of columns in the image
:param channels: number of color channels (e.g., 1 for MNIST)
:param nb_filters: number of convolutional filters per layer
:param nb_classes: the number of output classes
:return:
"""
model = Sequential()
# Define the layers successively (convolution layers are version dependent)
if keras.backend.image_dim_ordering() == 'th':
input_shape = (channels, img_rows, img_cols)
else:
input_shape = (img_rows, img_cols, channels)
layers = [conv_2d(nb_filters, (8, 8), (2, 2), "same",
input_shape=input_shape),
Activation('relu'),
conv_2d((nb_filters * 2), (6, 6), (2, 2), "valid"),
Activation('relu'),
conv_2d((nb_filters * 2), (5, 5), (1, 1), "valid"),
Activation('relu'),
Flatten(),
Dense(nb_classes)]
for layer in layers:
model.add(layer)
model.add(Activation('softmax'))
return model
def modelA(logits=False, input_ph=None, img_rows=28, img_cols=28,
channels=1, nb_filters=64, nb_classes=10):
"""
Defines a CNN model using Keras sequential model
:param logits: If set to False, returns a Keras model, otherwise will also
return logits tensor
:param input_ph: The TensorFlow tensor for the input
(needed if returning logits)
("ph" stands for placeholder but it need not actually be a
placeholder)
:param img_rows: number of row in the image
:param img_cols: number of columns in the image
:param channels: number of color channels (e.g., 1 for MNIST)
:param nb_filters: number of convolutional filters per layer
:param nb_classes: the number of output classes
:return:
"""
model = Sequential()
# Define the layers successively (convolution layers are version dependent)
if keras.backend.image_dim_ordering() == 'th':
input_shape = (channels, img_rows, img_cols)
else:
input_shape = (img_rows, img_cols, channels)
layers = [Flatten(input_shape=input_shape),
Dense(nb_filters),
Activation('relu'),
Dense(nb_filters * 2),
Activation('relu'),
Dense(nb_filters * 4),
Activation('relu'),
Dropout(0.2),
Dense(nb_classes)]
for layer in layers:
model.add(layer)
if logits:
logits_tensor = model(input_ph)
model.add(Activation('softmax'))
return model
def modelC(logits=False, input_ph=None, img_rows=28, img_cols=28,
channels=1, nb_filters=64, nb_classes=10):
"""
Defines a CNN model using Keras sequential model
:param logits: If set to False, returns a Keras model, otherwise will also
return logits tensor
:param input_ph: The TensorFlow tensor for the input
(needed if returning logits)
("ph" stands for placeholder but it need not actually be a
placeholder)
:param img_rows: number of row in the image
:param img_cols: number of columns in the image
:param channels: number of color channels (e.g., 1 for MNIST)
:param nb_filters: number of convolutional filters per layer
:param nb_classes: the number of output classes
:return:
"""
model = Sequential()
# Define the layers successively (convolution layers are version dependent)
if keras.backend.image_dim_ordering() == 'th':
input_shape = (channels, img_rows, img_cols)
else:
input_shape = (img_rows, img_cols, channels)
model = keras.Sequential([
keras.layers.Conv2D(input_shape=(28, 28, 1),
kernel_size=(3, 3), filters=32, activation='relu'),
keras.layers.Conv2D(kernel_size=(3, 3), filters=32, activation='relu'),
keras.layers.MaxPool2D(),
keras.layers.Conv2D(kernel_size=(3, 3), filters=32, activation='relu'),
keras.layers.Conv2D(kernel_size=(3, 3), filters=32, activation='relu'),
keras.layers.MaxPool2D(),
keras.layers.Flatten(),
keras.layers.Dense(200, activation='relu'),
keras.layers.Dropout(0.5),
keras.layers.Dense(200, activation='relu'),
keras.layers.Dense(10, activation='softmax')
])
return model | cleverhans_tutorials/mymodel.py | from distutils.version import LooseVersion
import keras
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten, Dropout
if LooseVersion(keras.__version__) >= LooseVersion('2.0.0'):
from keras.layers import Conv2D
else:
from keras.layers import Convolution2D
import math
def conv_2d(filters, kernel_shape, strides, padding, input_shape=None):
"""
Defines the right convolutional layer according to the
version of Keras that is installed.
:param filters: (required integer) the dimensionality of the output
space (i.e. the number output of filters in the
convolution)
:param kernel_shape: (required tuple or list of 2 integers) specifies
the strides of the convolution along the width and
height.
:param padding: (required string) can be either 'valid' (no padding around
input or feature map) or 'same' (pad to ensure that the
output feature map size is identical to the layer input)
:param input_shape: (optional) give input shape if this is the first
layer of the model
:return: the Keras layer
"""
def modelB(logits=False, input_ph=None, img_rows=28, img_cols=28,
channels=1, nb_filters=64, nb_classes=10):
"""
Defines a CNN model using Keras sequential model
:param logits: If set to False, returns a Keras model, otherwise will also
return logits tensor
:param input_ph: The TensorFlow tensor for the input
(needed if returning logits)
("ph" stands for placeholder but it need not actually be a
placeholder)
:param img_rows: number of row in the image
:param img_cols: number of columns in the image
:param channels: number of color channels (e.g., 1 for MNIST)
:param nb_filters: number of convolutional filters per layer
:param nb_classes: the number of output classes
:return:
"""
model = Sequential()
# Define the layers successively (convolution layers are version dependent)
if keras.backend.image_dim_ordering() == 'th':
input_shape = (channels, img_rows, img_cols)
else:
input_shape = (img_rows, img_cols, channels)
layers = [conv_2d(nb_filters, (8, 8), (2, 2), "same",
input_shape=input_shape),
Activation('relu'),
conv_2d((nb_filters * 2), (6, 6), (2, 2), "valid"),
Activation('relu'),
conv_2d((nb_filters * 2), (5, 5), (1, 1), "valid"),
Activation('relu'),
Flatten(),
Dense(nb_classes)]
for layer in layers:
model.add(layer)
model.add(Activation('softmax'))
return model
def modelA(logits=False, input_ph=None, img_rows=28, img_cols=28,
channels=1, nb_filters=64, nb_classes=10):
"""
Defines a CNN model using Keras sequential model
:param logits: If set to False, returns a Keras model, otherwise will also
return logits tensor
:param input_ph: The TensorFlow tensor for the input
(needed if returning logits)
("ph" stands for placeholder but it need not actually be a
placeholder)
:param img_rows: number of row in the image
:param img_cols: number of columns in the image
:param channels: number of color channels (e.g., 1 for MNIST)
:param nb_filters: number of convolutional filters per layer
:param nb_classes: the number of output classes
:return:
"""
model = Sequential()
# Define the layers successively (convolution layers are version dependent)
if keras.backend.image_dim_ordering() == 'th':
input_shape = (channels, img_rows, img_cols)
else:
input_shape = (img_rows, img_cols, channels)
layers = [Flatten(input_shape=input_shape),
Dense(nb_filters),
Activation('relu'),
Dense(nb_filters * 2),
Activation('relu'),
Dense(nb_filters * 4),
Activation('relu'),
Dropout(0.2),
Dense(nb_classes)]
for layer in layers:
model.add(layer)
if logits:
logits_tensor = model(input_ph)
model.add(Activation('softmax'))
return model
def modelC(logits=False, input_ph=None, img_rows=28, img_cols=28,
channels=1, nb_filters=64, nb_classes=10):
"""
Defines a CNN model using Keras sequential model
:param logits: If set to False, returns a Keras model, otherwise will also
return logits tensor
:param input_ph: The TensorFlow tensor for the input
(needed if returning logits)
("ph" stands for placeholder but it need not actually be a
placeholder)
:param img_rows: number of row in the image
:param img_cols: number of columns in the image
:param channels: number of color channels (e.g., 1 for MNIST)
:param nb_filters: number of convolutional filters per layer
:param nb_classes: the number of output classes
:return:
"""
model = Sequential()
# Define the layers successively (convolution layers are version dependent)
if keras.backend.image_dim_ordering() == 'th':
input_shape = (channels, img_rows, img_cols)
else:
input_shape = (img_rows, img_cols, channels)
model = keras.Sequential([
keras.layers.Conv2D(input_shape=(28, 28, 1),
kernel_size=(3, 3), filters=32, activation='relu'),
keras.layers.Conv2D(kernel_size=(3, 3), filters=32, activation='relu'),
keras.layers.MaxPool2D(),
keras.layers.Conv2D(kernel_size=(3, 3), filters=32, activation='relu'),
keras.layers.Conv2D(kernel_size=(3, 3), filters=32, activation='relu'),
keras.layers.MaxPool2D(),
keras.layers.Flatten(),
keras.layers.Dense(200, activation='relu'),
keras.layers.Dropout(0.5),
keras.layers.Dense(200, activation='relu'),
keras.layers.Dense(10, activation='softmax')
])
return model | 0.945876 | 0.558989 |
from layers import *
class DarkNet_Block(nn.Module):
"""
__version__ = 1.0
__date__ = Mar 7, 2022
paper : https://arxiv.org/abs/1804.02767
The structure is decribed in <Table 1.> of the paper.
"""
def __init__(self,
in_channels: int,
out_channels: int,
reduction: int = 2,
shortcut: bool = True,
Act: nn.Module = nn.LeakyReLU(negative_slope=0.1)):
channels = int(out_channels / reduction)
self.shortcut = shortcut and (in_channels == out_channels)
super(DarkNet_Block, self).__init__()
block = [Static_ConvLayer(in_channels, channels, 1, Act=Act),
Static_ConvLayer(channels, out_channels, 3, Act=Act)]
self.block = nn.Sequential(*block)
def forward(self, x):
input = x
x = self.block(x)
if self.shortcut:
x += input
return x
class CSP_DarkNet_Block(nn.Module):
"""
__version__ = 1.0
__date__ = Mar 7, 2022
paper : https://arxiv.org/abs/1911.11929
The structure is decribed in <Figure 3. (b)> of the paper.
"""
Block = DarkNet_Block
def __init__(self,
num_blocks: int,
in_channels: int,
out_channels: int,
half: bool = True,
block_reduction: int = 1,
Act: nn.Module = Mish()):
if half:
channels = int(out_channels / 2)
cat_channels = out_channels
else:
channels = out_channels
cat_channels = 2 * out_channels
super(CSP_DarkNet_Block, self).__init__()
self.part1 = Static_ConvLayer(in_channels, channels, 1, Act=Act)
self.part2 = Static_ConvLayer(in_channels, channels, 1, Act=Act)
dense = [self.Block(channels, channels, block_reduction, True, Act) for _ in range(num_blocks)]
self.dense = nn.Sequential(*dense)
self.trans1 = Static_ConvLayer(channels, channels, 1, Act=Act)
self.trans2 = Static_ConvLayer(cat_channels, out_channels, 1, Act=Act)
def forward(self, x):
x1 = self.part1(x)
x2 = self.part2(x)
x2 = self.dense(x2)
x2 = self.trans1(x2)
x = torch.cat((x2, x1), dim=1)
x = self.trans2(x)
return x
class CSP_DarkNet_Tiny_Block(nn.Module):
"""
__version__ = 1.0
__date__ = Mar 7, 2022
paper : https://arxiv.org/abs/2011.08036
The structure is decribed in <Figure 3.> of the paper.
"""
def __init__(self,
in_channels: int,
out_channels: int,
Act: nn.Module = nn.LeakyReLU(negative_slope=0.1)):
self.h_channels = int(out_channels / 2)
super(CSP_DarkNet_Tiny_Block, self).__init__()
self.part1 = Static_ConvLayer(in_channels, out_channels, 3, Act=Act)
self.part2_1 = Static_ConvLayer(self.h_channels, self.h_channels, 3, Act=Act)
self.part2_2 = Static_ConvLayer(self.h_channels, self.h_channels, 3, Act=Act)
self.trans = Static_ConvLayer(out_channels, out_channels, 1, Act=Act)
def forward(self, x):
x1 = self.part1(x)
x2 = torch.split(x1, self.h_channels, 1)[0]
x2_1 = self.part2_1(x2)
x2_2 = self.part2_2(x2_1)
x2 = torch.cat((x2_2, x2_1), 1)
x2 = self.trans(x2)
x = torch.cat((x1, x2), 1)
return x | backbone/block/dark.py | from layers import *
class DarkNet_Block(nn.Module):
"""
__version__ = 1.0
__date__ = Mar 7, 2022
paper : https://arxiv.org/abs/1804.02767
The structure is decribed in <Table 1.> of the paper.
"""
def __init__(self,
in_channels: int,
out_channels: int,
reduction: int = 2,
shortcut: bool = True,
Act: nn.Module = nn.LeakyReLU(negative_slope=0.1)):
channels = int(out_channels / reduction)
self.shortcut = shortcut and (in_channels == out_channels)
super(DarkNet_Block, self).__init__()
block = [Static_ConvLayer(in_channels, channels, 1, Act=Act),
Static_ConvLayer(channels, out_channels, 3, Act=Act)]
self.block = nn.Sequential(*block)
def forward(self, x):
input = x
x = self.block(x)
if self.shortcut:
x += input
return x
class CSP_DarkNet_Block(nn.Module):
"""
__version__ = 1.0
__date__ = Mar 7, 2022
paper : https://arxiv.org/abs/1911.11929
The structure is decribed in <Figure 3. (b)> of the paper.
"""
Block = DarkNet_Block
def __init__(self,
num_blocks: int,
in_channels: int,
out_channels: int,
half: bool = True,
block_reduction: int = 1,
Act: nn.Module = Mish()):
if half:
channels = int(out_channels / 2)
cat_channels = out_channels
else:
channels = out_channels
cat_channels = 2 * out_channels
super(CSP_DarkNet_Block, self).__init__()
self.part1 = Static_ConvLayer(in_channels, channels, 1, Act=Act)
self.part2 = Static_ConvLayer(in_channels, channels, 1, Act=Act)
dense = [self.Block(channels, channels, block_reduction, True, Act) for _ in range(num_blocks)]
self.dense = nn.Sequential(*dense)
self.trans1 = Static_ConvLayer(channels, channels, 1, Act=Act)
self.trans2 = Static_ConvLayer(cat_channels, out_channels, 1, Act=Act)
def forward(self, x):
x1 = self.part1(x)
x2 = self.part2(x)
x2 = self.dense(x2)
x2 = self.trans1(x2)
x = torch.cat((x2, x1), dim=1)
x = self.trans2(x)
return x
class CSP_DarkNet_Tiny_Block(nn.Module):
"""
__version__ = 1.0
__date__ = Mar 7, 2022
paper : https://arxiv.org/abs/2011.08036
The structure is decribed in <Figure 3.> of the paper.
"""
def __init__(self,
in_channels: int,
out_channels: int,
Act: nn.Module = nn.LeakyReLU(negative_slope=0.1)):
self.h_channels = int(out_channels / 2)
super(CSP_DarkNet_Tiny_Block, self).__init__()
self.part1 = Static_ConvLayer(in_channels, out_channels, 3, Act=Act)
self.part2_1 = Static_ConvLayer(self.h_channels, self.h_channels, 3, Act=Act)
self.part2_2 = Static_ConvLayer(self.h_channels, self.h_channels, 3, Act=Act)
self.trans = Static_ConvLayer(out_channels, out_channels, 1, Act=Act)
def forward(self, x):
x1 = self.part1(x)
x2 = torch.split(x1, self.h_channels, 1)[0]
x2_1 = self.part2_1(x2)
x2_2 = self.part2_2(x2_1)
x2 = torch.cat((x2_2, x2_1), 1)
x2 = self.trans(x2)
x = torch.cat((x1, x2), 1)
return x | 0.944389 | 0.477798 |
from unittest import mock
import pytest
from .cherry_picker import get_base_branch, get_current_branch, \
get_full_sha_from_short, is_cpython_repo, CherryPicker, \
normalize_commit_message
def test_get_base_branch():
cherry_pick_branch = 'backport-afc23f4-2.7'
result = get_base_branch(cherry_pick_branch)
assert result == '2.7'
def test_get_base_branch_without_dash():
cherry_pick_branch ='master'
result = get_base_branch(cherry_pick_branch)
assert result == 'master'
@mock.patch('subprocess.check_output')
def test_get_current_branch(subprocess_check_output):
subprocess_check_output.return_value = b'master'
assert get_current_branch() == 'master'
@mock.patch('subprocess.check_output')
def test_get_full_sha_from_short(subprocess_check_output):
mock_output = b"""commit 22a594a0047d7706537ff2ac676cdc0f1dcb329c
tree 14ab2ea85e7a28adb9d40f185006308d87a67f47
parent 5908300e4b0891fc5ab8bd24fba8fac72012eaa7
author <NAME> <<EMAIL>> 1492106895 +0200
committer Mariatta <<EMAIL>> 1492106895 -0700
bpo-29694: race condition in pathlib mkdir with flags parents=True (GH-1089)
diff --git a/Lib/pathlib.py b/Lib/pathlib.py
index fc7ce5e..1914229 100644
--- a/Lib/pathlib.py
+++ b/Lib/pathlib.py
"""
subprocess_check_output.return_value = mock_output
assert get_full_sha_from_short('22a594a') == '22a594a0047d7706537ff2ac676cdc0f1dcb329c'
@mock.patch('os.path.exists')
def test_sorted_branch(os_path_exists):
os_path_exists.return_value = True
branches = ["3.1", "2.7", "3.10", "3.6"]
cp = CherryPicker('origin', '22a594a0047d7706537ff2ac676cdc0f1dcb329c', branches)
assert cp.sorted_branches == ["3.10", "3.6", "3.1", "2.7"]
@mock.patch('os.path.exists')
def test_get_cherry_pick_branch(os_path_exists):
os_path_exists.return_value = True
branches = ["3.6"]
cp = CherryPicker('origin', '22a594a0047d7706537ff2ac676cdc0f1dcb329c', branches)
assert cp.get_cherry_pick_branch("3.6") == "backport-22a594a-3.6"
@mock.patch('os.path.exists')
@mock.patch('subprocess.check_output')
def test_get_pr_url(subprocess_check_output, os_path_exists):
os_path_exists.return_value = True
subprocess_check_output.return_value = b'https://github.com/mock_user/cpython.git'
branches = ["3.6"]
cp = CherryPicker('origin', '22a594a0047d7706537ff2ac676cdc0f1dcb329c',
branches)
assert cp.get_pr_url("3.6", cp.get_cherry_pick_branch("3.6")) \
== "https://github.com/python/cpython/compare/3.6...mock_user:backport-22a594a-3.6?expand=1"
@pytest.mark.parametrize('url', [
b'<EMAIL>:mock_user/cpython.git',
b'<EMAIL>:mock_user/cpython',
b'ssh://git@github.com/mock_user/cpython.git',
b'ssh://git@github.com/mock_user/cpython',
b'https://github.com/mock_user/cpython.git',
b'https://github.com/mock_user/cpython',
])
def test_username(url):
with mock.patch('subprocess.check_output', return_value=url):
branches = ["3.6"]
cp = CherryPicker('origin', '22a594a0047d7706537ff2ac676cdc0f1dcb329c',
branches)
assert cp.username == 'mock_user'
@mock.patch('os.path.exists')
@mock.patch('subprocess.check_output')
def test_get_updated_commit_message(subprocess_check_output, os_path_exists):
os_path_exists.return_value = True
subprocess_check_output.return_value = b'bpo-123: Fix Spam Module (#113)'
branches = ["3.6"]
cp = CherryPicker('origin', '22a594a0047d7706537ff2ac676cdc0f1dcb329c',
branches)
assert cp.get_commit_message('22a594a0047d7706537ff2ac676cdc0f1dcb329c') \
== 'bpo-123: Fix Spam Module (GH-113)'
@mock.patch('subprocess.check_output')
def test_is_cpython_repo(subprocess_check_output):
subprocess_check_output.return_value = """commit 7f777ed95a19224294949e1b4ce56bbffcb1fe9f
Author: <NAME> <<EMAIL>>
Date: Thu Aug 9 14:25:15 1990 +0000
Initial revision
"""
assert is_cpython_repo() == True
def test_is_not_cpython_repo():
assert is_cpython_repo() == False
def test_normalize_long_commit_message():
commit_message = """[3.6] Fix broken `Show Source` links on documentation pages (GH-3113)
The `Show Source` was broken because of a change made in sphinx 1.5.1
In Sphinx 1.4.9, the sourcename was "index.txt".
In Sphinx 1.5.1+, it is now "index.rst.txt".
(cherry picked from commit <PASSWORD>)"""
title, body = normalize_commit_message(commit_message)
assert title == "[3.6] Fix broken `Show Source` links on documentation pages (GH-3113)"
assert body == """The `Show Source` was broken because of a change made in sphinx 1.5.1
In Sphinx 1.4.9, the sourcename was "index.txt".
In Sphinx 1.5.1+, it is now "index.rst.txt".
(cherry picked from commit <PASSWORD>)"""
def test_normalize_short_commit_message():
commit_message = """[3.6] Fix broken `Show Source` links on documentation pages (GH-3113)
(cherry picked from commit <PASSWORD>)"""
title, body = normalize_commit_message(commit_message)
assert title == "[3.6] Fix broken `Show Source` links on documentation pages (GH-3113)"
assert body == """(cherry picked from commit <PASSWORD>)""" | cherry_picker/cherry_picker/test.py | from unittest import mock
import pytest
from .cherry_picker import get_base_branch, get_current_branch, \
get_full_sha_from_short, is_cpython_repo, CherryPicker, \
normalize_commit_message
def test_get_base_branch():
cherry_pick_branch = 'backport-afc23f4-2.7'
result = get_base_branch(cherry_pick_branch)
assert result == '2.7'
def test_get_base_branch_without_dash():
cherry_pick_branch ='master'
result = get_base_branch(cherry_pick_branch)
assert result == 'master'
@mock.patch('subprocess.check_output')
def test_get_current_branch(subprocess_check_output):
subprocess_check_output.return_value = b'master'
assert get_current_branch() == 'master'
@mock.patch('subprocess.check_output')
def test_get_full_sha_from_short(subprocess_check_output):
mock_output = b"""commit 22a594a0047d7706537ff2ac676cdc0f1dcb329c
tree 14ab2ea85e7a28adb9d40f185006308d87a67f47
parent 5908300e4b0891fc5ab8bd24fba8fac72012eaa7
author <NAME> <<EMAIL>> 1492106895 +0200
committer Mariatta <<EMAIL>> 1492106895 -0700
bpo-29694: race condition in pathlib mkdir with flags parents=True (GH-1089)
diff --git a/Lib/pathlib.py b/Lib/pathlib.py
index fc7ce5e..1914229 100644
--- a/Lib/pathlib.py
+++ b/Lib/pathlib.py
"""
subprocess_check_output.return_value = mock_output
assert get_full_sha_from_short('22a594a') == '22a594a0047d7706537ff2ac676cdc0f1dcb329c'
@mock.patch('os.path.exists')
def test_sorted_branch(os_path_exists):
os_path_exists.return_value = True
branches = ["3.1", "2.7", "3.10", "3.6"]
cp = CherryPicker('origin', '22a594a0047d7706537ff2ac676cdc0f1dcb329c', branches)
assert cp.sorted_branches == ["3.10", "3.6", "3.1", "2.7"]
@mock.patch('os.path.exists')
def test_get_cherry_pick_branch(os_path_exists):
os_path_exists.return_value = True
branches = ["3.6"]
cp = CherryPicker('origin', '22a594a0047d7706537ff2ac676cdc0f1dcb329c', branches)
assert cp.get_cherry_pick_branch("3.6") == "backport-22a594a-3.6"
@mock.patch('os.path.exists')
@mock.patch('subprocess.check_output')
def test_get_pr_url(subprocess_check_output, os_path_exists):
os_path_exists.return_value = True
subprocess_check_output.return_value = b'https://github.com/mock_user/cpython.git'
branches = ["3.6"]
cp = CherryPicker('origin', '22a594a0047d7706537ff2ac676cdc0f1dcb329c',
branches)
assert cp.get_pr_url("3.6", cp.get_cherry_pick_branch("3.6")) \
== "https://github.com/python/cpython/compare/3.6...mock_user:backport-22a594a-3.6?expand=1"
@pytest.mark.parametrize('url', [
b'<EMAIL>:mock_user/cpython.git',
b'<EMAIL>:mock_user/cpython',
b'ssh://git@github.com/mock_user/cpython.git',
b'ssh://git@github.com/mock_user/cpython',
b'https://github.com/mock_user/cpython.git',
b'https://github.com/mock_user/cpython',
])
def test_username(url):
with mock.patch('subprocess.check_output', return_value=url):
branches = ["3.6"]
cp = CherryPicker('origin', '22a594a0047d7706537ff2ac676cdc0f1dcb329c',
branches)
assert cp.username == 'mock_user'
@mock.patch('os.path.exists')
@mock.patch('subprocess.check_output')
def test_get_updated_commit_message(subprocess_check_output, os_path_exists):
os_path_exists.return_value = True
subprocess_check_output.return_value = b'bpo-123: Fix Spam Module (#113)'
branches = ["3.6"]
cp = CherryPicker('origin', '22a594a0047d7706537ff2ac676cdc0f1dcb329c',
branches)
assert cp.get_commit_message('22a594a0047d7706537ff2ac676cdc0f1dcb329c') \
== 'bpo-123: Fix Spam Module (GH-113)'
@mock.patch('subprocess.check_output')
def test_is_cpython_repo(subprocess_check_output):
subprocess_check_output.return_value = """commit 7f777ed95a19224294949e1b4ce56bbffcb1fe9f
Author: <NAME> <<EMAIL>>
Date: Thu Aug 9 14:25:15 1990 +0000
Initial revision
"""
assert is_cpython_repo() == True
def test_is_not_cpython_repo():
assert is_cpython_repo() == False
def test_normalize_long_commit_message():
commit_message = """[3.6] Fix broken `Show Source` links on documentation pages (GH-3113)
The `Show Source` was broken because of a change made in sphinx 1.5.1
In Sphinx 1.4.9, the sourcename was "index.txt".
In Sphinx 1.5.1+, it is now "index.rst.txt".
(cherry picked from commit <PASSWORD>)"""
title, body = normalize_commit_message(commit_message)
assert title == "[3.6] Fix broken `Show Source` links on documentation pages (GH-3113)"
assert body == """The `Show Source` was broken because of a change made in sphinx 1.5.1
In Sphinx 1.4.9, the sourcename was "index.txt".
In Sphinx 1.5.1+, it is now "index.rst.txt".
(cherry picked from commit <PASSWORD>)"""
def test_normalize_short_commit_message():
commit_message = """[3.6] Fix broken `Show Source` links on documentation pages (GH-3113)
(cherry picked from commit <PASSWORD>)"""
title, body = normalize_commit_message(commit_message)
assert title == "[3.6] Fix broken `Show Source` links on documentation pages (GH-3113)"
assert body == """(cherry picked from commit <PASSWORD>)""" | 0.628293 | 0.287818 |
import numpy as np
import csv
import matplotlib.pyplot as plt
n = 16 # number of input features.
m = 60 # number of training examples.
grad = np.zeros(shape = (n, 1))
theta = np.ones(shape=(n, 1), dtype = float)
hx = np.ones(shape=(m, 1), dtype = float)
file_handle = open("datasets/air-pollution/data.csv", "r")
reader = csv.reader(file_handle, delimiter = ',')
learning_rate = 1e-6
def h(X):
global theta
res = np.matmul(np.transpose(theta), X)
return res
cost_list = []
itr_list = []
def gradient_descent_algorithm():
global theta, grad
num_itrs = 10000
for itr in range(num_itrs):
file_handle.seek(0)
total_cost = 0.0
idx = 0
for row in reader:
X = [float(x) for x in row[0: -1]]
# list of floats
X = np.asarray(X)
np.reshape(X, [n, 1])
hx[idx][0] = h(X)
y_correct = float(row[0])
diff = (hx[idx][0] - y_correct)
total_cost += (diff * diff)
idx += 1
for j in range(n):
grad[j][0] = 0.0
i = 0
file_handle.seek(0)
for row in reader:
y_correct = float(row[-1])
xij = float(row[j + 1])
diff = hx[i][0] - y_correct
grad[j][0] += ((learning_rate * diff * xij) / m)
i += 1
theta = theta - grad
total_cost = total_cost /(2 * m)
cost_list.append(total_cost)
itr_list.append(itr + 1)
gradient_descent_algorithm()
plt.plot(itr_list, cost_list, label = "cost")
plt.xlabel("iterations")
# naming the y axis
plt.ylabel('Cost')
# giving a title to my graph
plt.title('Cost vs iterations')
# show a legend on the plot
plt.legend()
# function to show the plot
plt.show()
ypaxis = []
ycaxis = []
xaxis = []
index = 0
file_handle.seek(0)
for row in reader:
X = [float(x) for x in row[1:]]
# list of floats
X = np.asarray(X)
np.reshape(X, [n, 1])
pred = h(X)
y_correct = float(row[0])
index += 1
ypaxis.append(pred)
ycaxis.append(y_correct)
xaxis.append(index)
plt.plot(xaxis, ycaxis, label = "correct")
plt.plot(xaxis, ypaxis, label = "prediction")
plt.xlabel("examples")
# naming the y axis
plt.ylabel('h_theta')
plt.title('correct vs predicted')
# show a legend on the plot
plt.legend()
# function to show the plot
plt.show() | master/multivariate-linear-regression.py | import numpy as np
import csv
import matplotlib.pyplot as plt
n = 16 # number of input features.
m = 60 # number of training examples.
grad = np.zeros(shape = (n, 1))
theta = np.ones(shape=(n, 1), dtype = float)
hx = np.ones(shape=(m, 1), dtype = float)
file_handle = open("datasets/air-pollution/data.csv", "r")
reader = csv.reader(file_handle, delimiter = ',')
learning_rate = 1e-6
def h(X):
global theta
res = np.matmul(np.transpose(theta), X)
return res
cost_list = []
itr_list = []
def gradient_descent_algorithm():
global theta, grad
num_itrs = 10000
for itr in range(num_itrs):
file_handle.seek(0)
total_cost = 0.0
idx = 0
for row in reader:
X = [float(x) for x in row[0: -1]]
# list of floats
X = np.asarray(X)
np.reshape(X, [n, 1])
hx[idx][0] = h(X)
y_correct = float(row[0])
diff = (hx[idx][0] - y_correct)
total_cost += (diff * diff)
idx += 1
for j in range(n):
grad[j][0] = 0.0
i = 0
file_handle.seek(0)
for row in reader:
y_correct = float(row[-1])
xij = float(row[j + 1])
diff = hx[i][0] - y_correct
grad[j][0] += ((learning_rate * diff * xij) / m)
i += 1
theta = theta - grad
total_cost = total_cost /(2 * m)
cost_list.append(total_cost)
itr_list.append(itr + 1)
gradient_descent_algorithm()
plt.plot(itr_list, cost_list, label = "cost")
plt.xlabel("iterations")
# naming the y axis
plt.ylabel('Cost')
# giving a title to my graph
plt.title('Cost vs iterations')
# show a legend on the plot
plt.legend()
# function to show the plot
plt.show()
ypaxis = []
ycaxis = []
xaxis = []
index = 0
file_handle.seek(0)
for row in reader:
X = [float(x) for x in row[1:]]
# list of floats
X = np.asarray(X)
np.reshape(X, [n, 1])
pred = h(X)
y_correct = float(row[0])
index += 1
ypaxis.append(pred)
ycaxis.append(y_correct)
xaxis.append(index)
plt.plot(xaxis, ycaxis, label = "correct")
plt.plot(xaxis, ypaxis, label = "prediction")
plt.xlabel("examples")
# naming the y axis
plt.ylabel('h_theta')
plt.title('correct vs predicted')
# show a legend on the plot
plt.legend()
# function to show the plot
plt.show() | 0.360489 | 0.653956 |
import ethereum.tester
from ethereum.tester import TransactionFailed
import attr
import binascii
import unittest
import collections
from types import MethodType
from typing import List
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__version__ = '3.2.0'
__license__ = 'MIT'
__all__ = [
'ContractTest',
'default_accounts'
]
GLOBAL_STATE = ethereum.tester.state()
@attr.s
class Account:
raw_address = attr.ib(
validator=attr.validators.instance_of(bytes)
)
private_key = attr.ib(
validator=attr.validators.instance_of(bytes)
)
@property
def address(self) -> bytes:
return binascii.hexlify(self.raw_address)
default_accounts: List[Account] = list(
map(
Account,
ethereum.tester.accounts,
ethereum.tester.keys
)
)
class ContractTestMeta(type):
"""Metaclass for ContractTest which ensures tests are run in order."""
@classmethod
def __prepare__(mcs, name, bases, **kwds):
result = collections.OrderedDict()
if kwds.get('globalState', False):
result['state'] = GLOBAL_STATE
else:
result['state'] = ethereum.tester.state()
return result
def __new__(mcs, name, bases, cls_dict):
test_order = []
for name in cls_dict:
if name.startswith('test_') and callable(cls_dict[name]):
test_order.append(name)
cls_dict['__test_order__'] = test_order
return super().__new__(mcs, name, bases, cls_dict)
class ContractTestLoader(unittest.TestLoader):
def getTestCaseNames(self, test_case_class: 'ContractTest'):
try:
return test_case_class.__test_order__
except AttributeError:
return super().getTestCaseNames(test_case_class)
class ContractTest(unittest.TestCase, metaclass=ContractTestMeta):
__test_order__: List[str]
creator: Account = default_accounts[0]
source: str
state: ethereum.tester.state
contract: ethereum.tester.ABIContract
address: str
@classmethod
def setUpClass(cls):
cls.contract = cls.state.abi_contract(
cls.source,
sender=cls.creator.private_key
)
cls.address = binascii.hexlify(cls.contract.address).decode()
# renames functions so they make more sense in errors
for name, obj in vars(cls.contract).items():
if isinstance(obj, MethodType):
if obj.__func__.__name__ == 'kall':
obj.__func__.__name__ = name
def setUp(self):
# avoids hitting block gas limit
self.state.mine()
def assertTxFail(self):
return self.assertRaises(TransactionFailed)
@staticmethod
def run_tests(warnings='ignore'):
loader = ContractTestLoader()
unittest.main(testLoader=loader, warnings=warnings, verbosity=2) | serpent_tests/__init__.py | import ethereum.tester
from ethereum.tester import TransactionFailed
import attr
import binascii
import unittest
import collections
from types import MethodType
from typing import List
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__version__ = '3.2.0'
__license__ = 'MIT'
__all__ = [
'ContractTest',
'default_accounts'
]
GLOBAL_STATE = ethereum.tester.state()
@attr.s
class Account:
raw_address = attr.ib(
validator=attr.validators.instance_of(bytes)
)
private_key = attr.ib(
validator=attr.validators.instance_of(bytes)
)
@property
def address(self) -> bytes:
return binascii.hexlify(self.raw_address)
default_accounts: List[Account] = list(
map(
Account,
ethereum.tester.accounts,
ethereum.tester.keys
)
)
class ContractTestMeta(type):
"""Metaclass for ContractTest which ensures tests are run in order."""
@classmethod
def __prepare__(mcs, name, bases, **kwds):
result = collections.OrderedDict()
if kwds.get('globalState', False):
result['state'] = GLOBAL_STATE
else:
result['state'] = ethereum.tester.state()
return result
def __new__(mcs, name, bases, cls_dict):
test_order = []
for name in cls_dict:
if name.startswith('test_') and callable(cls_dict[name]):
test_order.append(name)
cls_dict['__test_order__'] = test_order
return super().__new__(mcs, name, bases, cls_dict)
class ContractTestLoader(unittest.TestLoader):
def getTestCaseNames(self, test_case_class: 'ContractTest'):
try:
return test_case_class.__test_order__
except AttributeError:
return super().getTestCaseNames(test_case_class)
class ContractTest(unittest.TestCase, metaclass=ContractTestMeta):
__test_order__: List[str]
creator: Account = default_accounts[0]
source: str
state: ethereum.tester.state
contract: ethereum.tester.ABIContract
address: str
@classmethod
def setUpClass(cls):
cls.contract = cls.state.abi_contract(
cls.source,
sender=cls.creator.private_key
)
cls.address = binascii.hexlify(cls.contract.address).decode()
# renames functions so they make more sense in errors
for name, obj in vars(cls.contract).items():
if isinstance(obj, MethodType):
if obj.__func__.__name__ == 'kall':
obj.__func__.__name__ = name
def setUp(self):
# avoids hitting block gas limit
self.state.mine()
def assertTxFail(self):
return self.assertRaises(TransactionFailed)
@staticmethod
def run_tests(warnings='ignore'):
loader = ContractTestLoader()
unittest.main(testLoader=loader, warnings=warnings, verbosity=2) | 0.665954 | 0.176672 |
import logging
import os
import platform
import pwd
import threading
import ipaddress
import requests
import yaml
from containercluster import ca, utils
__all__ = [
"Config",
"SSHKeyPair"
]
class Config(object):
dir_lock = threading.RLock()
log = logging.getLogger(__name__)
def __init__(self, home=None):
if home is None:
self.home = os.path.expanduser("~")
else:
self.home = home
self._clusters = {}
def add_cluster(self, name, channel, n_etcd, size_etcd, n_workers,
size_worker, provider, location, network, subnet_length,
subnet_min, subnet_max, services_ip_range, dns_service_ip,
kubernetes_service_ip):
cluster = {
"provider": provider,
"channel": channel,
"location": location,
"discovery_token": make_discovery_token(n_etcd),
"network": network,
"subnet_length": subnet_length,
"subnet_min": subnet_min,
"subnet_max": subnet_max,
"services_ip_range": services_ip_range,
"dns_service_ip": dns_service_ip,
"kubernetes_service_ip": kubernetes_service_ip,
"nodes": [],
}
for i in range(n_etcd):
cluster["nodes"].append({
"name": "%s-etcd%d" % (name, i),
"type": "etcd",
"size": size_etcd,
})
cluster["nodes"].append({
"name": "%s-master" % (name,),
"type": "master",
"size": size_worker,
})
for i in range(n_workers):
cluster["nodes"].append({
"name": "%s-worker%d" % (name, i),
"type": "worker",
"size": size_worker,
})
self._clusters[name] = cluster
def remove_cluster(self, name):
try:
del self._clusters[name]
except KeyError:
pass
def save(self):
fname = self.clusters_yaml_path
self.log.debug("Saving clusters definitions to %s", fname)
clusters = dict(self._clusters)
for c in clusters.values():
c = dict(c)
for k in ("network", "subnet_min", "subnet_max"):
c[k] = str(c[k])
with open(fname, "wt") as f:
yaml.dump(clusters, f)
def __repr__(self):
return "<Config %r>" % (self._clusters,)
@property
def clusters(self):
if not self._clusters:
fname = self.clusters_yaml_path
if os.access(fname, os.F_OK):
self.log.debug("Loading clusters definitions from %s", fname)
with open(fname, "rt") as f:
self._clusters = yaml.load(f)
for c in self._clusters.values():
for k in ("network", "subnet_min", "subnet_max"):
c[k] = ipaddress.ip_network(c[k])
else:
self._clusters = {}
return dict(self._clusters)
@property
def clusters_yaml_path(self):
return os.path.join(self.config_dir, "clusters.yaml")
@property
def config_dir(self):
return self._ensure_dir(os.path.join(self.home, ".container-cluster"))
@property
def bin_dir(self):
return self._ensure_dir(os.path.join(self.config_dir, "bin"))
@property
def ca_dir(self):
return self._ensure_dir(os.path.join(self.config_dir, "ca"))
@property
def ssh_dir(self):
return self._ensure_dir(os.path.join(self.config_dir, ".ssh"))
@property
def ca_cert_path(self):
return ca.CA(self.ca_dir).cert_path
@property
def admin_cert_path(self):
fname, _ = self._ensure_admin_tls()
return fname
@property
def admin_key_path(self):
_, fname = self._ensure_admin_tls()
return fname
def node_tls_paths(self, node_name, alt_names=None):
return ca.CA(self.ca_dir).generate_cert(node_name, alt_names)
def _ensure_admin_tls(self):
return self.node_tls_paths(u"admin")
@property
def ssh_key_pair(self):
return SSHKeyPair(self.ssh_dir)
def _ensure_dir(self, dname):
with self.dir_lock:
if not os.access(dname, os.F_OK):
self.log.debug("Creating directory %s", dname)
os.makedirs(dname)
return dname
def kubeconfig_path(self, cluster_name, master_ip):
kubeconfig = {
"apiVersion": "v1",
"kind": "Config",
"clusters": [
{
"name": cluster_name,
"cluster": {
"certificate-authority": self.ca_cert_path,
"server": "https://%s" % (master_ip,)
}
},
],
"users": [
{
"name": "admin",
"user": {
"client-certificate": self.admin_cert_path,
"client-key": self.admin_key_path,
},
},
],
"contexts": [
{
"name": cluster_name,
"context": {
"cluster": cluster_name,
"user": "admin",
}
},
],
"current-context": cluster_name,
}
fname = os.path.join(self.config_dir, "kubeconfig-%s" % (cluster_name,))
with open(fname, "wt") as f:
yaml.dump(kubeconfig, f)
return fname
class SSHKeyPair(object):
ssh_keygen_lock = threading.Lock()
log = logging.getLogger(__name__)
def __init__(self, dname):
self.dname = dname
@property
def name(self):
return ("container-cluster-%s-%s" %
(pwd.getpwuid(os.geteuid()).pw_name,
platform.node().split(".")[0]))
@property
def _key_file_name(self):
return os.path.join(self.dname, "id_rsa-%s" % (self.name,))
@property
def public_key(self):
fname = self._ensure_ssh_key() + ".pub"
with open(fname, "rt") as f:
return f.read()
@property
def private_key_path(self):
return self._ensure_ssh_key()
def _ensure_ssh_key(self):
fname = self._key_file_name
with self.ssh_keygen_lock:
if not os.access(fname, os.R_OK):
self.log.debug("Generating SSH key pair %s", fname)
utils.run("ssh-keygen -f %s -N ''" % (fname,))
return fname
LOG = logging.getLogger(__name__)
def make_discovery_token(size):
res = requests.get("https://discovery.etcd.io/new?size=%d" % (size,))
res.raise_for_status()
token = res.content[len("https://discovery.etcd.io/"):]
LOG.debug("New token for %d node(s): %s", size, token)
return token | containercluster/config.py | import logging
import os
import platform
import pwd
import threading
import ipaddress
import requests
import yaml
from containercluster import ca, utils
__all__ = [
"Config",
"SSHKeyPair"
]
class Config(object):
dir_lock = threading.RLock()
log = logging.getLogger(__name__)
def __init__(self, home=None):
if home is None:
self.home = os.path.expanduser("~")
else:
self.home = home
self._clusters = {}
def add_cluster(self, name, channel, n_etcd, size_etcd, n_workers,
size_worker, provider, location, network, subnet_length,
subnet_min, subnet_max, services_ip_range, dns_service_ip,
kubernetes_service_ip):
cluster = {
"provider": provider,
"channel": channel,
"location": location,
"discovery_token": make_discovery_token(n_etcd),
"network": network,
"subnet_length": subnet_length,
"subnet_min": subnet_min,
"subnet_max": subnet_max,
"services_ip_range": services_ip_range,
"dns_service_ip": dns_service_ip,
"kubernetes_service_ip": kubernetes_service_ip,
"nodes": [],
}
for i in range(n_etcd):
cluster["nodes"].append({
"name": "%s-etcd%d" % (name, i),
"type": "etcd",
"size": size_etcd,
})
cluster["nodes"].append({
"name": "%s-master" % (name,),
"type": "master",
"size": size_worker,
})
for i in range(n_workers):
cluster["nodes"].append({
"name": "%s-worker%d" % (name, i),
"type": "worker",
"size": size_worker,
})
self._clusters[name] = cluster
def remove_cluster(self, name):
try:
del self._clusters[name]
except KeyError:
pass
def save(self):
fname = self.clusters_yaml_path
self.log.debug("Saving clusters definitions to %s", fname)
clusters = dict(self._clusters)
for c in clusters.values():
c = dict(c)
for k in ("network", "subnet_min", "subnet_max"):
c[k] = str(c[k])
with open(fname, "wt") as f:
yaml.dump(clusters, f)
def __repr__(self):
return "<Config %r>" % (self._clusters,)
@property
def clusters(self):
if not self._clusters:
fname = self.clusters_yaml_path
if os.access(fname, os.F_OK):
self.log.debug("Loading clusters definitions from %s", fname)
with open(fname, "rt") as f:
self._clusters = yaml.load(f)
for c in self._clusters.values():
for k in ("network", "subnet_min", "subnet_max"):
c[k] = ipaddress.ip_network(c[k])
else:
self._clusters = {}
return dict(self._clusters)
@property
def clusters_yaml_path(self):
return os.path.join(self.config_dir, "clusters.yaml")
@property
def config_dir(self):
return self._ensure_dir(os.path.join(self.home, ".container-cluster"))
@property
def bin_dir(self):
return self._ensure_dir(os.path.join(self.config_dir, "bin"))
@property
def ca_dir(self):
return self._ensure_dir(os.path.join(self.config_dir, "ca"))
@property
def ssh_dir(self):
return self._ensure_dir(os.path.join(self.config_dir, ".ssh"))
@property
def ca_cert_path(self):
return ca.CA(self.ca_dir).cert_path
@property
def admin_cert_path(self):
fname, _ = self._ensure_admin_tls()
return fname
@property
def admin_key_path(self):
_, fname = self._ensure_admin_tls()
return fname
def node_tls_paths(self, node_name, alt_names=None):
return ca.CA(self.ca_dir).generate_cert(node_name, alt_names)
def _ensure_admin_tls(self):
return self.node_tls_paths(u"admin")
@property
def ssh_key_pair(self):
return SSHKeyPair(self.ssh_dir)
def _ensure_dir(self, dname):
with self.dir_lock:
if not os.access(dname, os.F_OK):
self.log.debug("Creating directory %s", dname)
os.makedirs(dname)
return dname
def kubeconfig_path(self, cluster_name, master_ip):
kubeconfig = {
"apiVersion": "v1",
"kind": "Config",
"clusters": [
{
"name": cluster_name,
"cluster": {
"certificate-authority": self.ca_cert_path,
"server": "https://%s" % (master_ip,)
}
},
],
"users": [
{
"name": "admin",
"user": {
"client-certificate": self.admin_cert_path,
"client-key": self.admin_key_path,
},
},
],
"contexts": [
{
"name": cluster_name,
"context": {
"cluster": cluster_name,
"user": "admin",
}
},
],
"current-context": cluster_name,
}
fname = os.path.join(self.config_dir, "kubeconfig-%s" % (cluster_name,))
with open(fname, "wt") as f:
yaml.dump(kubeconfig, f)
return fname
class SSHKeyPair(object):
ssh_keygen_lock = threading.Lock()
log = logging.getLogger(__name__)
def __init__(self, dname):
self.dname = dname
@property
def name(self):
return ("container-cluster-%s-%s" %
(pwd.getpwuid(os.geteuid()).pw_name,
platform.node().split(".")[0]))
@property
def _key_file_name(self):
return os.path.join(self.dname, "id_rsa-%s" % (self.name,))
@property
def public_key(self):
fname = self._ensure_ssh_key() + ".pub"
with open(fname, "rt") as f:
return f.read()
@property
def private_key_path(self):
return self._ensure_ssh_key()
def _ensure_ssh_key(self):
fname = self._key_file_name
with self.ssh_keygen_lock:
if not os.access(fname, os.R_OK):
self.log.debug("Generating SSH key pair %s", fname)
utils.run("ssh-keygen -f %s -N ''" % (fname,))
return fname
LOG = logging.getLogger(__name__)
def make_discovery_token(size):
res = requests.get("https://discovery.etcd.io/new?size=%d" % (size,))
res.raise_for_status()
token = res.content[len("https://discovery.etcd.io/"):]
LOG.debug("New token for %d node(s): %s", size, token)
return token | 0.400984 | 0.113064 |
import copy
class MetaPrototype(type):
""" A metaclass for Prototypes """
def __init__(cls, *args):
type.__init__(cls, *args)
cls.clone = lambda self: copy.deepcopy(self)
class MetaSingletonPrototype(type):
""" A metaclass for Singleton & Prototype patterns """
def __init__(cls, *args):
print(cls,"__init__ method called with args", args)
type.__init__(cls, *args)
cls.instance = None
cls.clone = lambda self: copy.deepcopy(cls.instance)
def __call__(cls, *args, **kwargs):
if not cls.instance:
print(cls,"creating prototypical instance", args, kwargs)
cls.instance = type.__call__(cls,*args, **kwargs)
return cls.instance
class PrototypeM(metaclass=MetaSingletonPrototype):
pass
class ItemCollection(PrototypeM):
""" An item collection class """
def __init__(self, items=[]):
self.items = items
class Prototype(object):
""" A prototype base class """
def clone(self):
""" Return a clone of self """
return copy.deepcopy(self)
class Register(Prototype):
""" A student Register class """
def __init__(self, names=[]):
self.names = names
class SPrototype(object):
""" A prototype base class using shallow copy """
def clone(self):
""" Return a clone of self """
return copy.copy(self)
class SRegister(SPrototype):
""" Sub-class of SPrototype """
def __init__(self, stuff=(), names=[]):
self.stuff = stuff
self.names = names
class PrototypeFactory(Borg):
""" A Prototype factory/registry class """
def __init__(self):
""" Initializer """
self._registry = {}
def register(self, instance):
""" Register a given instance """
self._registry[instance.__class__] = instance
def clone(self, klass):
""" Return cloned instance of given class """
instance = self._registry.get(klass)
if instance == None:
print('Error:',klass,'not registered')
else:
return instance.clone()
class Name(SPrototype):
""" A class representing a person's name """
def __init__(self, first, second):
self.first = first
self.second = second
def __str__(self):
return ' '.join((self.first, self.second))
class Animal(SPrototype):
""" A class representing an animal """
def __init__(self, name, type='Wild'):
self.name = name
self.type = type
def __str__(self):
return ' '.join((str(self.type), self.name)) | Section 3/prototype.py | import copy
class MetaPrototype(type):
""" A metaclass for Prototypes """
def __init__(cls, *args):
type.__init__(cls, *args)
cls.clone = lambda self: copy.deepcopy(self)
class MetaSingletonPrototype(type):
""" A metaclass for Singleton & Prototype patterns """
def __init__(cls, *args):
print(cls,"__init__ method called with args", args)
type.__init__(cls, *args)
cls.instance = None
cls.clone = lambda self: copy.deepcopy(cls.instance)
def __call__(cls, *args, **kwargs):
if not cls.instance:
print(cls,"creating prototypical instance", args, kwargs)
cls.instance = type.__call__(cls,*args, **kwargs)
return cls.instance
class PrototypeM(metaclass=MetaSingletonPrototype):
pass
class ItemCollection(PrototypeM):
""" An item collection class """
def __init__(self, items=[]):
self.items = items
class Prototype(object):
""" A prototype base class """
def clone(self):
""" Return a clone of self """
return copy.deepcopy(self)
class Register(Prototype):
""" A student Register class """
def __init__(self, names=[]):
self.names = names
class SPrototype(object):
""" A prototype base class using shallow copy """
def clone(self):
""" Return a clone of self """
return copy.copy(self)
class SRegister(SPrototype):
""" Sub-class of SPrototype """
def __init__(self, stuff=(), names=[]):
self.stuff = stuff
self.names = names
class PrototypeFactory(Borg):
""" A Prototype factory/registry class """
def __init__(self):
""" Initializer """
self._registry = {}
def register(self, instance):
""" Register a given instance """
self._registry[instance.__class__] = instance
def clone(self, klass):
""" Return cloned instance of given class """
instance = self._registry.get(klass)
if instance == None:
print('Error:',klass,'not registered')
else:
return instance.clone()
class Name(SPrototype):
""" A class representing a person's name """
def __init__(self, first, second):
self.first = first
self.second = second
def __str__(self):
return ' '.join((self.first, self.second))
class Animal(SPrototype):
""" A class representing an animal """
def __init__(self, name, type='Wild'):
self.name = name
self.type = type
def __str__(self):
return ' '.join((str(self.type), self.name)) | 0.682045 | 0.086555 |
del_items(0x801384E4)
SetType(0x801384E4, "void GameOnlyTestRoutine__Fv()")
del_items(0x801384EC)
SetType(0x801384EC, "int vecleny__Fii(int a, int b)")
del_items(0x80138510)
SetType(0x80138510, "int veclenx__Fii(int a, int b)")
del_items(0x8013853C)
SetType(0x8013853C, "void GetDamageAmt__FiPiT1(int i, int *mind, int *maxd)")
del_items(0x80138B34)
SetType(0x80138B34, "int CheckBlock__Fiiii(int fx, int fy, int tx, int ty)")
del_items(0x80138C1C)
SetType(0x80138C1C, "int FindClosest__Fiii(int sx, int sy, int rad)")
del_items(0x80138DB8)
SetType(0x80138DB8, "int GetSpellLevel__Fii(int id, int sn)")
del_items(0x80138E2C)
SetType(0x80138E2C, "int GetDirection8__Fiiii(int x1, int y1, int x2, int y2)")
del_items(0x80139048)
SetType(0x80139048, "int GetDirection16__Fiiii(int x1, int y1, int x2, int y2)")
del_items(0x80139264)
SetType(0x80139264, "void DeleteMissile__Fii(int mi, int i)")
del_items(0x801392BC)
SetType(0x801392BC, "void GetMissileVel__Fiiiiii(int i, int sx, int sy, int dx, int dy, int v)")
del_items(0x80139470)
SetType(0x80139470, "void PutMissile__Fi(int i)")
del_items(0x80139574)
SetType(0x80139574, "void GetMissilePos__Fi(int i)")
del_items(0x8013969C)
SetType(0x8013969C, "void MoveMissilePos__Fi(int i)")
del_items(0x80139804)
SetType(0x80139804, "unsigned char MonsterTrapHit__FiiiiiUc(int m, int mindam, int maxdam, int dist, int t, int shift)")
del_items(0x80139B78)
SetType(0x80139B78, "unsigned char MonsterMHit__FiiiiiiUc(int pnum, int m, int mindam, int maxdam, int dist, int t, int shift)")
del_items(0x8013A2D8)
SetType(0x8013A2D8, "unsigned char PlayerMHit__FiiiiiiUcUc(int pnum, int m, int dist, int mind, int maxd, int mtype, int shift, int earflag)")
del_items(0x8013AD44)
SetType(0x8013AD44, "unsigned char Plr2PlrMHit__FiiiiiiUc(int pnum, int p, int mindam, int maxdam, int dist, int mtype, int shift)")
del_items(0x8013B520)
SetType(0x8013B520, "void CheckMissileCol__FiiiUciiUc(int i, int mindam, int maxdam, unsigned char shift, int mx, int my, int nodel)")
del_items(0x8013B99C)
SetType(0x8013B99C, "unsigned char GetTableValue__FUci(unsigned char code, int dir)")
del_items(0x8013BA30)
SetType(0x8013BA30, "void SetMissAnim__Fii(int mi, int animtype)")
del_items(0x8013BB00)
SetType(0x8013BB00, "void SetMissDir__Fii(int mi, int dir)")
del_items(0x8013BB44)
SetType(0x8013BB44, "void AddLArrow__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013BD24)
SetType(0x8013BD24, "void AddArrow__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013BEE0)
SetType(0x8013BEE0, "void GetVileMissPos__Fiii(int mi, int dx, int dy)")
del_items(0x8013C004)
SetType(0x8013C004, "void AddRndTeleport__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013C374)
SetType(0x8013C374, "void AddFirebolt__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int micaster, int id, int dam)")
del_items(0x8013C5E0)
SetType(0x8013C5E0, "void AddMagmaball__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013C6F4)
SetType(0x8013C6F4, "void AddTeleport__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013C8EC)
SetType(0x8013C8EC, "void AddLightball__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013CA40)
SetType(0x8013CA40, "void AddFirewall__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013CC28)
SetType(0x8013CC28, "void AddFireball__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013CE84)
SetType(0x8013CE84, "void AddLightctrl__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013CF6C)
SetType(0x8013CF6C, "void AddLightning__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013D134)
SetType(0x8013D134, "void AddMisexp__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013D340)
SetType(0x8013D340, "unsigned char CheckIfTrig__Fii(int x, int y)")
del_items(0x8013D424)
SetType(0x8013D424, "void AddTown__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013D848)
SetType(0x8013D848, "void AddFlash__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013DA58)
SetType(0x8013DA58, "void AddFlash2__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013DC38)
SetType(0x8013DC38, "void AddManashield__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013DD00)
SetType(0x8013DD00, "void AddFiremove__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013DE5C)
SetType(0x8013DE5C, "void AddGuardian__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013E2C8)
SetType(0x8013E2C8, "void AddChain__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013E324)
SetType(0x8013E324, "void AddRhino__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013E4E0)
SetType(0x8013E4E0, "void AddFlare__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013E7D8)
SetType(0x8013E7D8, "void AddAcid__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013E8DC)
SetType(0x8013E8DC, "void AddAcidpud__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013E9B4)
SetType(0x8013E9B4, "void AddStone__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013ECAC)
SetType(0x8013ECAC, "void AddGolem__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013EE64)
SetType(0x8013EE64, "void AddBoom__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013EEF8)
SetType(0x8013EEF8, "void AddHeal__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013F120)
SetType(0x8013F120, "void AddHealOther__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013F188)
SetType(0x8013F188, "void AddElement__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013F3B4)
SetType(0x8013F3B4, "void AddIdentify__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013F464)
SetType(0x8013F464, "void AddFirewallC__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013F714)
SetType(0x8013F714, "void AddInfra__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013F810)
SetType(0x8013F810, "void AddWave__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013F894)
SetType(0x8013F894, "void AddNova__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013FAAC)
SetType(0x8013FAAC, "void AddRepair__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013FB5C)
SetType(0x8013FB5C, "void AddRecharge__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013FC0C)
SetType(0x8013FC0C, "void AddDisarm__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013FC74)
SetType(0x8013FC74, "void AddApoca__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013FEB0)
SetType(0x8013FEB0, "void AddFlame__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int seqno)")
del_items(0x801400CC)
SetType(0x801400CC, "void AddFlamec__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x801401BC)
SetType(0x801401BC, "void AddCbolt__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int micaster, int id, int dam)")
del_items(0x801403B0)
SetType(0x801403B0, "void AddHbolt__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int micaster, int id, int dam)")
del_items(0x80140570)
SetType(0x80140570, "void AddResurrect__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x801405E4)
SetType(0x801405E4, "void AddResurrectBeam__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8014066C)
SetType(0x8014066C, "void AddTelekinesis__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x801406D4)
SetType(0x801406D4, "void AddBoneSpirit__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x801408D0)
SetType(0x801408D0, "void AddRportal__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80140970)
SetType(0x80140970, "void AddDiabApoca__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80140AAC)
SetType(0x80140AAC, "int AddMissile__Fiiiiiiciii(int sx, int sy, int v1, int v2, int midir, int mitype, int micaster, int id, int v3, int spllvl)")
del_items(0x80140DF8)
SetType(0x80140DF8, "int Sentfire__Fiii(int i, int sx, int sy)")
del_items(0x80140FDC)
SetType(0x80140FDC, "void MI_Dummy__Fi(int i)")
del_items(0x80140FE4)
SetType(0x80140FE4, "void MI_Golem__Fi(int i)")
del_items(0x80141240)
SetType(0x80141240, "void MI_SetManashield__Fi(int i)")
del_items(0x8014127C)
SetType(0x8014127C, "void MI_LArrow__Fi(int i)")
del_items(0x80141A38)
SetType(0x80141A38, "void MI_Arrow__Fi(int i)")
del_items(0x80141C54)
SetType(0x80141C54, "void MI_Firebolt__Fi(int i)")
del_items(0x80142320)
SetType(0x80142320, "void MI_Lightball__Fi(int i)")
del_items(0x801425A8)
SetType(0x801425A8, "void MI_Acidpud__Fi(int i)")
del_items(0x801426B8)
SetType(0x801426B8, "void MI_Firewall__Fi(int i)")
del_items(0x8014297C)
SetType(0x8014297C, "void MI_Fireball__Fi(int i)")
del_items(0x80143340)
SetType(0x80143340, "void MI_Lightctrl__Fi(int i)")
del_items(0x801436BC)
SetType(0x801436BC, "void MI_Lightning__Fi(int i)")
del_items(0x801437A8)
SetType(0x801437A8, "void MI_Town__Fi(int i)")
del_items(0x801439E0)
SetType(0x801439E0, "void MI_Flash__Fi(int i)")
del_items(0x80143D34)
SetType(0x80143D34, "void MI_Flash2__Fi(int i)")
del_items(0x80143EFC)
SetType(0x80143EFC, "void MI_Manashield__Fi(int i)")
del_items(0x80144220)
SetType(0x80144220, "void MI_Firemove__Fi(int i)")
del_items(0x801444AC)
SetType(0x801444AC, "void MI_Guardian__Fi(int i)")
del_items(0x8014475C)
SetType(0x8014475C, "void MI_Chain__Fi(int i)")
del_items(0x801449C8)
SetType(0x801449C8, "void MI_Misexp__Fi(int i)")
del_items(0x80144CC8)
SetType(0x80144CC8, "void MI_Acidsplat__Fi(int i)")
del_items(0x80144E64)
SetType(0x80144E64, "void MI_Teleport__Fi(int i)")
del_items(0x8014522C)
SetType(0x8014522C, "void MI_Stone__Fi(int i)")
del_items(0x801453D8)
SetType(0x801453D8, "void MI_Boom__Fi(int i)")
del_items(0x801454D0)
SetType(0x801454D0, "void MI_Rhino__Fi(int i)")
del_items(0x8014587C)
SetType(0x8014587C, "void MI_FirewallC__Fi(int i)")
del_items(0x80145B04)
SetType(0x80145B04, "void MI_Infra__Fi(int i)")
del_items(0x80145BBC)
SetType(0x80145BBC, "void MI_Apoca__Fi(int i)")
del_items(0x80145E50)
SetType(0x80145E50, "void MI_Wave__Fi(int i)")
del_items(0x8014634C)
SetType(0x8014634C, "void MI_Nova__Fi(int i)")
del_items(0x8014660C)
SetType(0x8014660C, "void MI_Flame__Fi(int i)")
del_items(0x80146804)
SetType(0x80146804, "void MI_Flamec__Fi(int i)")
del_items(0x80146A8C)
SetType(0x80146A8C, "void MI_Cbolt__Fi(int i)")
del_items(0x80146D90)
SetType(0x80146D90, "void MI_Hbolt__Fi(int i)")
del_items(0x8014709C)
SetType(0x8014709C, "void MI_Element__Fi(int i)")
del_items(0x80147754)
SetType(0x80147754, "void MI_Bonespirit__Fi(int i)")
del_items(0x80147B5C)
SetType(0x80147B5C, "void MI_ResurrectBeam__Fi(int i)")
del_items(0x80147BCC)
SetType(0x80147BCC, "void MI_Rportal__Fi(int i)")
del_items(0x80147DF0)
SetType(0x80147DF0, "void ProcessMissiles__Fv()")
del_items(0x801481E4)
SetType(0x801481E4, "void ClearMissileSpot__Fi(int mi)")
del_items(0x8014829C)
SetType(0x8014829C, "void MoveToScrollTarget__7CBlocks(struct CBlocks *this)")
del_items(0x801482B0)
SetType(0x801482B0, "void MonstPartJump__Fi(int m)")
del_items(0x80148444)
SetType(0x80148444, "void DeleteMonster__Fi(int i)")
del_items(0x8014847C)
SetType(0x8014847C, "int M_GetDir__Fi(int i)")
del_items(0x801484D8)
SetType(0x801484D8, "void M_StartDelay__Fii(int i, int len)")
del_items(0x80148520)
SetType(0x80148520, "void M_StartRAttack__Fiii(int i, int missile_type, int dam)")
del_items(0x80148638)
SetType(0x80148638, "void M_StartRSpAttack__Fiii(int i, int missile_type, int dam)")
del_items(0x8014875C)
SetType(0x8014875C, "void M_StartSpAttack__Fi(int i)")
del_items(0x80148844)
SetType(0x80148844, "void M_StartEat__Fi(int i)")
del_items(0x80148914)
SetType(0x80148914, "void M_GetKnockback__Fi(int i)")
del_items(0x80148AEC)
SetType(0x80148AEC, "void M_StartHit__Fiii(int i, int pnum, int dam)")
del_items(0x80148DE4)
SetType(0x80148DE4, "void M_DiabloDeath__FiUc(int i, unsigned char sendmsg)")
del_items(0x801490F4)
SetType(0x801490F4, "void M2MStartHit__Fiii(int mid, int i, int dam)")
del_items(0x801493A0)
SetType(0x801493A0, "void MonstStartKill__FiiUc(int i, int pnum, unsigned char sendmsg)")
del_items(0x8014968C)
SetType(0x8014968C, "void M2MStartKill__Fii(int i, int mid)")
del_items(0x80149A54)
SetType(0x80149A54, "void M_StartKill__Fii(int i, int pnum)")
del_items(0x80149B44)
SetType(0x80149B44, "void M_StartFadein__FiiUc(int i, int md, unsigned char backwards)")
del_items(0x80149C98)
SetType(0x80149C98, "void M_StartFadeout__FiiUc(int i, int md, unsigned char backwards)")
del_items(0x80149DE0)
SetType(0x80149DE0, "void M_StartHeal__Fi(int i)")
del_items(0x80149E60)
SetType(0x80149E60, "void M_ChangeLightOffset__Fi(int monst)")
del_items(0x80149F00)
SetType(0x80149F00, "int M_DoStand__Fi(int i)")
del_items(0x80149F68)
SetType(0x80149F68, "int M_DoWalk__Fi(int i)")
del_items(0x8014A1EC)
SetType(0x8014A1EC, "int M_DoWalk2__Fi(int i)")
del_items(0x8014A3D8)
SetType(0x8014A3D8, "int M_DoWalk3__Fi(int i)")
del_items(0x8014A69C)
SetType(0x8014A69C, "void M_TryM2MHit__Fiiiii(int i, int mid, int hper, int mind, int maxd)")
del_items(0x8014A864)
SetType(0x8014A864, "void M_TryH2HHit__Fiiiii(int i, int pnum, int Hit, int MinDam, int MaxDam)")
del_items(0x8014AE78)
SetType(0x8014AE78, "int M_DoAttack__Fi(int i)")
del_items(0x8014B01C)
SetType(0x8014B01C, "int M_DoRAttack__Fi(int i)")
del_items(0x8014B194)
SetType(0x8014B194, "int M_DoRSpAttack__Fi(int i)")
del_items(0x8014B384)
SetType(0x8014B384, "int M_DoSAttack__Fi(int i)")
del_items(0x8014B458)
SetType(0x8014B458, "int M_DoFadein__Fi(int i)")
del_items(0x8014B528)
SetType(0x8014B528, "int M_DoFadeout__Fi(int i)")
del_items(0x8014B63C)
SetType(0x8014B63C, "int M_DoHeal__Fi(int i)")
del_items(0x8014B6E8)
SetType(0x8014B6E8, "int M_DoTalk__Fi(int i)")
del_items(0x8014BB74)
SetType(0x8014BB74, "void M_Teleport__Fi(int i)")
del_items(0x8014BDA8)
SetType(0x8014BDA8, "int M_DoGotHit__Fi(int i)")
del_items(0x8014BE08)
SetType(0x8014BE08, "void DoEnding__Fv()")
del_items(0x8014BE9C)
SetType(0x8014BE9C, "void PrepDoEnding__Fv()")
del_items(0x8014BFB4)
SetType(0x8014BFB4, "int M_DoDeath__Fi(int i)")
del_items(0x8014C184)
SetType(0x8014C184, "int M_DoSpStand__Fi(int i)")
del_items(0x8014C228)
SetType(0x8014C228, "int M_DoDelay__Fi(int i)")
del_items(0x8014C318)
SetType(0x8014C318, "int M_DoStone__Fi(int i)")
del_items(0x8014C39C)
SetType(0x8014C39C, "void M_WalkDir__Fii(int i, int md)")
del_items(0x8014C5C4)
SetType(0x8014C5C4, "void GroupUnity__Fi(int i)")
del_items(0x8014C9B0)
SetType(0x8014C9B0, "unsigned char M_CallWalk__Fii(int i, int md)")
del_items(0x8014CB9C)
SetType(0x8014CB9C, "unsigned char M_PathWalk__Fi(int i, char plr2monst[9], unsigned char (*Check)())")
del_items(0x8014CC60)
SetType(0x8014CC60, "unsigned char M_CallWalk2__Fii(int i, int md)")
del_items(0x8014CD74)
SetType(0x8014CD74, "unsigned char M_DumbWalk__Fii(int i, int md)")
del_items(0x8014CDC8)
SetType(0x8014CDC8, "unsigned char M_RoundWalk__FiiRi(int i, int md, int *dir)")
del_items(0x8014CF68)
SetType(0x8014CF68, "void MAI_Zombie__Fi(int i)")
del_items(0x8014D160)
SetType(0x8014D160, "void MAI_SkelSd__Fi(int i)")
del_items(0x8014D2F8)
SetType(0x8014D2F8, "void MAI_Snake__Fi(int i)")
del_items(0x8014D6DC)
SetType(0x8014D6DC, "void MAI_Bat__Fi(int i)")
del_items(0x8014DA94)
SetType(0x8014DA94, "void MAI_SkelBow__Fi(int i)")
del_items(0x8014DC78)
SetType(0x8014DC78, "void MAI_Fat__Fi(int i)")
del_items(0x8014DE28)
SetType(0x8014DE28, "void MAI_Sneak__Fi(int i)")
del_items(0x8014E214)
SetType(0x8014E214, "void MAI_Fireman__Fi(int i)")
del_items(0x8014E50C)
SetType(0x8014E50C, "void MAI_Fallen__Fi(int i)")
del_items(0x8014E828)
SetType(0x8014E828, "void MAI_Cleaver__Fi(int i)")
del_items(0x8014E910)
SetType(0x8014E910, "void MAI_Round__FiUc(int i, unsigned char special)")
del_items(0x8014ED7C)
SetType(0x8014ED7C, "void MAI_GoatMc__Fi(int i)")
del_items(0x8014ED9C)
SetType(0x8014ED9C, "void MAI_Ranged__FiiUc(int i, int missile_type, unsigned char special)")
del_items(0x8014EFBC)
SetType(0x8014EFBC, "void MAI_GoatBow__Fi(int i)")
del_items(0x8014EFE0)
SetType(0x8014EFE0, "void MAI_Succ__Fi(int i)")
del_items(0x8014F004)
SetType(0x8014F004, "void MAI_AcidUniq__Fi(int i)")
del_items(0x8014F028)
SetType(0x8014F028, "void MAI_Scav__Fi(int i)")
del_items(0x8014F440)
SetType(0x8014F440, "void MAI_Garg__Fi(int i)")
del_items(0x8014F620)
SetType(0x8014F620, "void MAI_RoundRanged__FiiUciUc(int i, int missile_type, unsigned char checkdoors, int dam, int lessmissiles)")
del_items(0x8014FB34)
SetType(0x8014FB34, "void MAI_Magma__Fi(int i)")
del_items(0x8014FB60)
SetType(0x8014FB60, "void MAI_Storm__Fi(int i)")
del_items(0x8014FB8C)
SetType(0x8014FB8C, "void MAI_Acid__Fi(int i)")
del_items(0x8014FBBC)
SetType(0x8014FBBC, "void MAI_Diablo__Fi(int i)")
del_items(0x8014FBE8)
SetType(0x8014FBE8, "void MAI_RR2__Fiii(int i, int mistype, int dam)")
del_items(0x801500E8)
SetType(0x801500E8, "void MAI_Mega__Fi(int i)")
del_items(0x8015010C)
SetType(0x8015010C, "void MAI_SkelKing__Fi(int i)")
del_items(0x80150648)
SetType(0x80150648, "void MAI_Rhino__Fi(int i)")
del_items(0x80150AF0)
SetType(0x80150AF0, "void MAI_Counselor__Fi(int i, unsigned char counsmiss[4], int _mx, int _my)")
del_items(0x80150FBC)
SetType(0x80150FBC, "void MAI_Garbud__Fi(int i)")
del_items(0x8015116C)
SetType(0x8015116C, "void MAI_Zhar__Fi(int i)")
del_items(0x80151364)
SetType(0x80151364, "void MAI_SnotSpil__Fi(int i)")
del_items(0x80151598)
SetType(0x80151598, "void MAI_Lazurus__Fi(int i)")
del_items(0x801517D8)
SetType(0x801517D8, "void MAI_Lazhelp__Fi(int i)")
del_items(0x801518F8)
SetType(0x801518F8, "void MAI_Lachdanan__Fi(int i)")
del_items(0x80151A88)
SetType(0x80151A88, "void MAI_Warlord__Fi(int i)")
del_items(0x80151BD4)
SetType(0x80151BD4, "void DeleteMonsterList__Fv()")
del_items(0x80151CF0)
SetType(0x80151CF0, "void ProcessMonsters__Fv()")
del_items(0x80152278)
SetType(0x80152278, "unsigned char DirOK__Fii(int i, int mdir)")
del_items(0x80152660)
SetType(0x80152660, "unsigned char PosOkMissile__Fii(int x, int y)")
del_items(0x801526C8)
SetType(0x801526C8, "unsigned char CheckNoSolid__Fii(int x, int y)")
del_items(0x8015270C)
SetType(0x8015270C, "unsigned char LineClearF__FPFii_Uciiii(unsigned char (*Clear)(), int x1, int y1, int x2, int y2)")
del_items(0x80152994)
SetType(0x80152994, "unsigned char LineClear__Fiiii(int x1, int y1, int x2, int y2)")
del_items(0x801529D4)
SetType(0x801529D4, "unsigned char LineClearF1__FPFiii_Uciiiii(unsigned char (*Clear)(), int monst, int x1, int y1, int x2, int y2)")
del_items(0x80152C68)
SetType(0x80152C68, "void M_FallenFear__Fii(int x, int y)")
del_items(0x80152E38)
SetType(0x80152E38, "void PrintMonstHistory__Fi(int mt)")
del_items(0x801530EC)
SetType(0x801530EC, "void PrintUniqueHistory__Fv()")
del_items(0x80153210)
SetType(0x80153210, "void MissToMonst__Fiii(int i, int x, int y)")
del_items(0x80153674)
SetType(0x80153674, "unsigned char PosOkMonst2__Fiii(int i, int x, int y)")
del_items(0x80153890)
SetType(0x80153890, "unsigned char PosOkMonst3__Fiii(int i, int x, int y)")
del_items(0x80153B84)
SetType(0x80153B84, "int M_SpawnSkel__Fiii(int x, int y, int dir)")
del_items(0x80153CDC)
SetType(0x80153CDC, "void TalktoMonster__Fi(int i)")
del_items(0x80153DFC)
SetType(0x80153DFC, "void SpawnGolum__Fiiii(int i, int x, int y, int mi)")
del_items(0x80154054)
SetType(0x80154054, "unsigned char CanTalkToMonst__Fi(int m)")
del_items(0x8015408C)
SetType(0x8015408C, "unsigned char CheckMonsterHit__FiRUc(int m, unsigned char *ret)")
del_items(0x80154158)
SetType(0x80154158, "void MAI_Golum__Fi(int i)")
del_items(0x801544CC)
SetType(0x801544CC, "unsigned char MAI_Path__Fi(int i)")
del_items(0x80154630)
SetType(0x80154630, "void M_StartAttack__Fi(int i)")
del_items(0x80154718)
SetType(0x80154718, "void M_StartWalk__Fiiiiii(int i, int xvel, int yvel, int xadd, int yadd, int EndDir)")
del_items(0x80154878)
SetType(0x80154878, "void FreeInvGFX__Fv()")
del_items(0x80154880)
SetType(0x80154880, "void InvDrawSlot__Fiii(int X, int Y, int Frame)")
del_items(0x80154904)
SetType(0x80154904, "void InvDrawSlotBack__FiiiiUc(int X, int Y, int W, int H, int Flag)")
del_items(0x80154B58)
SetType(0x80154B58, "void InvDrawItem__FiiiUci(int ItemX, int ItemY, int ItemNo, unsigned char StatFlag, int TransFlag)")
del_items(0x80154C28)
SetType(0x80154C28, "void InvDrawSlots__Fv()")
del_items(0x80154F00)
SetType(0x80154F00, "void PrintStat__FiiPcUc(int Y, int Txt0, char *Txt1, unsigned char Col)")
del_items(0x80154FCC)
SetType(0x80154FCC, "void DrawInvStats__Fv()")
del_items(0x80155AE8)
SetType(0x80155AE8, "void DrawInvBack__Fv()")
del_items(0x80155B70)
SetType(0x80155B70, "void DrawInvCursor__Fv()")
del_items(0x8015604C)
SetType(0x8015604C, "void DrawInvMsg__Fv()")
del_items(0x80156214)
SetType(0x80156214, "void DrawInvUnique__Fv()")
del_items(0x80156338)
SetType(0x80156338, "void DrawInv__Fv()")
del_items(0x80156378)
SetType(0x80156378, "void DrawInvTSK__FP4TASK(struct TASK *T)")
del_items(0x801566C4)
SetType(0x801566C4, "void DoThatDrawInv__Fv()")
del_items(0x80156E8C)
SetType(0x80156E8C, "unsigned char AutoPlace__FiiiiUc(int pnum, int ii, int sx, int sy, int saveflag)")
del_items(0x801571AC)
SetType(0x801571AC, "unsigned char SpecialAutoPlace__FiiiiUc(int pnum, int ii, int sx, int sy, int saveflag)")
del_items(0x80157548)
SetType(0x80157548, "unsigned char GoldAutoPlace__Fi(int pnum)")
del_items(0x80157A18)
SetType(0x80157A18, "unsigned char WeaponAutoPlace__Fi(int pnum)")
del_items(0x80157CA4)
SetType(0x80157CA4, "int SwapItem__FP10ItemStructT0(struct ItemStruct *a, struct ItemStruct *b)")
del_items(0x80157DA0)
SetType(0x80157DA0, "void CheckInvPaste__Fiii(int pnum, int mx, int my)")
del_items(0x80159A8C)
SetType(0x80159A8C, "void CheckInvCut__Fiii(int pnum, int mx, int my)")
del_items(0x8015A53C)
SetType(0x8015A53C, "void RemoveInvItem__Fii(int pnum, int iv)")
del_items(0x8015A7E4)
SetType(0x8015A7E4, "void RemoveSpdBarItem__Fii(int pnum, int iv)")
del_items(0x8015A8D8)
SetType(0x8015A8D8, "void CheckInvScrn__Fv()")
del_items(0x8015A950)
SetType(0x8015A950, "void CheckItemStats__Fi(int pnum)")
del_items(0x8015A9D4)
SetType(0x8015A9D4, "void CheckBookLevel__Fi(int pnum)")
del_items(0x8015AB08)
SetType(0x8015AB08, "void CheckQuestItem__Fi(int pnum)")
del_items(0x8015AF30)
SetType(0x8015AF30, "void InvGetItem__Fii(int pnum, int ii)")
del_items(0x8015B22C)
SetType(0x8015B22C, "void AutoGetItem__Fii(int pnum, int ii)")
del_items(0x8015BC9C)
SetType(0x8015BC9C, "int FindGetItem__FiUsi(int idx, unsigned short ci, int iseed)")
del_items(0x8015BD50)
SetType(0x8015BD50, "void SyncGetItem__FiiiUsi(int x, int y, int idx, unsigned short ci, int iseed)")
del_items(0x8015BEDC)
SetType(0x8015BEDC, "unsigned char TryInvPut__Fv()")
del_items(0x8015C0A4)
SetType(0x8015C0A4, "int InvPutItem__Fiii(int pnum, int x, int y)")
del_items(0x8015C54C)
SetType(0x8015C54C, "int SyncPutItem__FiiiiUsiUciiiiiUl(int pnum, int x, int y, int idx, int icreateinfo, int iseed, int Id, int dur, int mdur, int ch, int mch, int ivalue, unsigned long ibuff)")
del_items(0x8015CAA8)
SetType(0x8015CAA8, "char CheckInvHLight__Fv()")
del_items(0x8015CDF0)
SetType(0x8015CDF0, "void RemoveScroll__Fi(int pnum)")
del_items(0x8015CFD4)
SetType(0x8015CFD4, "unsigned char UseScroll__Fv()")
del_items(0x8015D23C)
SetType(0x8015D23C, "void UseStaffCharge__FP12PlayerStruct(struct PlayerStruct *ptrplr)")
del_items(0x8015D2A4)
SetType(0x8015D2A4, "unsigned char UseStaff__Fv()")
del_items(0x8015D364)
SetType(0x8015D364, "void StartGoldDrop__Fv()")
del_items(0x8015D460)
SetType(0x8015D460, "unsigned char UseInvItem__Fii(int pnum, int cii)")
del_items(0x8015D984)
SetType(0x8015D984, "void DoTelekinesis__Fv()")
del_items(0x8015DAAC)
SetType(0x8015DAAC, "long CalculateGold__Fi(int pnum)")
del_items(0x8015DBE4)
SetType(0x8015DBE4, "unsigned char DropItemBeforeTrig__Fv()")
del_items(0x8015DC3C)
SetType(0x8015DC3C, "void ControlInv__Fv()")
del_items(0x8015DF1C)
SetType(0x8015DF1C, "void InvGetItemWH__Fi(int Pos)")
del_items(0x8015E010)
SetType(0x8015E010, "void InvAlignObject__Fv()")
del_items(0x8015E1C4)
SetType(0x8015E1C4, "void InvSetItemCurs__Fv()")
del_items(0x8015E358)
SetType(0x8015E358, "void InvMoveCursLeft__Fv()")
del_items(0x8015E500)
SetType(0x8015E500, "void InvMoveCursRight__Fv()")
del_items(0x8015E7B4)
SetType(0x8015E7B4, "void InvMoveCursUp__Fv()")
del_items(0x8015E9AC)
SetType(0x8015E9AC, "void InvMoveCursDown__Fv()")
del_items(0x8015ECB4)
SetType(0x8015ECB4, "void DumpMonsters__7CBlocks(struct CBlocks *this)")
del_items(0x8015ECDC)
SetType(0x8015ECDC, "void Flush__4CPad(struct CPad *this)")
del_items(0x8015ED00)
SetType(0x8015ED00, "void SetRGB__6DialogUcUcUc(struct Dialog *this, unsigned char R, unsigned char G, unsigned char B)")
del_items(0x8015ED20)
SetType(0x8015ED20, "void SetBack__6Dialogi(struct Dialog *this, int Type)")
del_items(0x8015ED28)
SetType(0x8015ED28, "void SetBorder__6Dialogi(struct Dialog *this, int Type)")
del_items(0x8015ED30)
SetType(0x8015ED30, "int SetOTpos__6Dialogi(struct Dialog *this, int OT)")
del_items(0x8015ED3C)
SetType(0x8015ED3C, "void ___6Dialog(struct Dialog *this, int __in_chrg)")
del_items(0x8015ED64)
SetType(0x8015ED64, "struct Dialog *__6Dialog(struct Dialog *this)")
del_items(0x8015EDC0)
SetType(0x8015EDC0, "void StartAutomap__Fv()")
del_items(0x8015EDD0)
SetType(0x8015EDD0, "void AutomapUp__Fv()")
del_items(0x8015EDF0)
SetType(0x8015EDF0, "void AutomapDown__Fv()")
del_items(0x8015EE10)
SetType(0x8015EE10, "void AutomapLeft__Fv()")
del_items(0x8015EE30)
SetType(0x8015EE30, "void AutomapRight__Fv()")
del_items(0x8015EE50)
SetType(0x8015EE50, "struct LINE_F2 *AMGetLine__FUcUcUc(unsigned char R, unsigned char G, unsigned char B)")
del_items(0x8015EEFC)
SetType(0x8015EEFC, "void AmDrawLine__Fiiii(int x0, int y0, int x1, int y1)")
del_items(0x8015EF64)
SetType(0x8015EF64, "void AmDrawPlayer__Fiiii(int x0, int y0, int x1, int y1)")
del_items(0x8015EFCC)
SetType(0x8015EFCC, "void DrawAutomapPlr__Fv()")
del_items(0x8015F2DC)
SetType(0x8015F2DC, "void DrawAutoMapVertWall__Fiiii(int X, int Y, int Length, int asd)")
del_items(0x8015F3D0)
SetType(0x8015F3D0, "void DrawAutoMapHorzWall__Fiiii(int X, int Y, int Length, int asd)")
del_items(0x8015F4C4)
SetType(0x8015F4C4, "void DrawAutoMapVertDoor__Fii(int X, int Y)")
del_items(0x8015F698)
SetType(0x8015F698, "void DrawAutoMapHorzDoor__Fii(int X, int Y)")
del_items(0x8015F870)
SetType(0x8015F870, "void DrawAutoMapVertGrate__Fii(int X, int Y)")
del_items(0x8015F924)
SetType(0x8015F924, "void DrawAutoMapHorzGrate__Fii(int X, int Y)")
del_items(0x8015F9D8)
SetType(0x8015F9D8, "void DrawAutoMapSquare__Fii(int X, int Y)")
del_items(0x8015FB20)
SetType(0x8015FB20, "void DrawAutoMapStairs__Fii(int X, int Y)")
del_items(0x8015FD20)
SetType(0x8015FD20, "void DrawAutomap__Fv()")
del_items(0x8016018C)
SetType(0x8016018C, "void PRIM_GetPrim__FPP7LINE_F2(struct LINE_F2 **Prim)") | psx/_dump_/44/_dump_ida_/overlay_c/set_funcs.py | del_items(0x801384E4)
SetType(0x801384E4, "void GameOnlyTestRoutine__Fv()")
del_items(0x801384EC)
SetType(0x801384EC, "int vecleny__Fii(int a, int b)")
del_items(0x80138510)
SetType(0x80138510, "int veclenx__Fii(int a, int b)")
del_items(0x8013853C)
SetType(0x8013853C, "void GetDamageAmt__FiPiT1(int i, int *mind, int *maxd)")
del_items(0x80138B34)
SetType(0x80138B34, "int CheckBlock__Fiiii(int fx, int fy, int tx, int ty)")
del_items(0x80138C1C)
SetType(0x80138C1C, "int FindClosest__Fiii(int sx, int sy, int rad)")
del_items(0x80138DB8)
SetType(0x80138DB8, "int GetSpellLevel__Fii(int id, int sn)")
del_items(0x80138E2C)
SetType(0x80138E2C, "int GetDirection8__Fiiii(int x1, int y1, int x2, int y2)")
del_items(0x80139048)
SetType(0x80139048, "int GetDirection16__Fiiii(int x1, int y1, int x2, int y2)")
del_items(0x80139264)
SetType(0x80139264, "void DeleteMissile__Fii(int mi, int i)")
del_items(0x801392BC)
SetType(0x801392BC, "void GetMissileVel__Fiiiiii(int i, int sx, int sy, int dx, int dy, int v)")
del_items(0x80139470)
SetType(0x80139470, "void PutMissile__Fi(int i)")
del_items(0x80139574)
SetType(0x80139574, "void GetMissilePos__Fi(int i)")
del_items(0x8013969C)
SetType(0x8013969C, "void MoveMissilePos__Fi(int i)")
del_items(0x80139804)
SetType(0x80139804, "unsigned char MonsterTrapHit__FiiiiiUc(int m, int mindam, int maxdam, int dist, int t, int shift)")
del_items(0x80139B78)
SetType(0x80139B78, "unsigned char MonsterMHit__FiiiiiiUc(int pnum, int m, int mindam, int maxdam, int dist, int t, int shift)")
del_items(0x8013A2D8)
SetType(0x8013A2D8, "unsigned char PlayerMHit__FiiiiiiUcUc(int pnum, int m, int dist, int mind, int maxd, int mtype, int shift, int earflag)")
del_items(0x8013AD44)
SetType(0x8013AD44, "unsigned char Plr2PlrMHit__FiiiiiiUc(int pnum, int p, int mindam, int maxdam, int dist, int mtype, int shift)")
del_items(0x8013B520)
SetType(0x8013B520, "void CheckMissileCol__FiiiUciiUc(int i, int mindam, int maxdam, unsigned char shift, int mx, int my, int nodel)")
del_items(0x8013B99C)
SetType(0x8013B99C, "unsigned char GetTableValue__FUci(unsigned char code, int dir)")
del_items(0x8013BA30)
SetType(0x8013BA30, "void SetMissAnim__Fii(int mi, int animtype)")
del_items(0x8013BB00)
SetType(0x8013BB00, "void SetMissDir__Fii(int mi, int dir)")
del_items(0x8013BB44)
SetType(0x8013BB44, "void AddLArrow__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013BD24)
SetType(0x8013BD24, "void AddArrow__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013BEE0)
SetType(0x8013BEE0, "void GetVileMissPos__Fiii(int mi, int dx, int dy)")
del_items(0x8013C004)
SetType(0x8013C004, "void AddRndTeleport__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013C374)
SetType(0x8013C374, "void AddFirebolt__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int micaster, int id, int dam)")
del_items(0x8013C5E0)
SetType(0x8013C5E0, "void AddMagmaball__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013C6F4)
SetType(0x8013C6F4, "void AddTeleport__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013C8EC)
SetType(0x8013C8EC, "void AddLightball__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013CA40)
SetType(0x8013CA40, "void AddFirewall__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013CC28)
SetType(0x8013CC28, "void AddFireball__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013CE84)
SetType(0x8013CE84, "void AddLightctrl__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013CF6C)
SetType(0x8013CF6C, "void AddLightning__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013D134)
SetType(0x8013D134, "void AddMisexp__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013D340)
SetType(0x8013D340, "unsigned char CheckIfTrig__Fii(int x, int y)")
del_items(0x8013D424)
SetType(0x8013D424, "void AddTown__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013D848)
SetType(0x8013D848, "void AddFlash__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013DA58)
SetType(0x8013DA58, "void AddFlash2__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013DC38)
SetType(0x8013DC38, "void AddManashield__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013DD00)
SetType(0x8013DD00, "void AddFiremove__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013DE5C)
SetType(0x8013DE5C, "void AddGuardian__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013E2C8)
SetType(0x8013E2C8, "void AddChain__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013E324)
SetType(0x8013E324, "void AddRhino__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013E4E0)
SetType(0x8013E4E0, "void AddFlare__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013E7D8)
SetType(0x8013E7D8, "void AddAcid__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013E8DC)
SetType(0x8013E8DC, "void AddAcidpud__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013E9B4)
SetType(0x8013E9B4, "void AddStone__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013ECAC)
SetType(0x8013ECAC, "void AddGolem__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013EE64)
SetType(0x8013EE64, "void AddBoom__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013EEF8)
SetType(0x8013EEF8, "void AddHeal__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013F120)
SetType(0x8013F120, "void AddHealOther__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013F188)
SetType(0x8013F188, "void AddElement__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013F3B4)
SetType(0x8013F3B4, "void AddIdentify__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013F464)
SetType(0x8013F464, "void AddFirewallC__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013F714)
SetType(0x8013F714, "void AddInfra__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013F810)
SetType(0x8013F810, "void AddWave__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013F894)
SetType(0x8013F894, "void AddNova__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013FAAC)
SetType(0x8013FAAC, "void AddRepair__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013FB5C)
SetType(0x8013FB5C, "void AddRecharge__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013FC0C)
SetType(0x8013FC0C, "void AddDisarm__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013FC74)
SetType(0x8013FC74, "void AddApoca__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8013FEB0)
SetType(0x8013FEB0, "void AddFlame__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int seqno)")
del_items(0x801400CC)
SetType(0x801400CC, "void AddFlamec__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x801401BC)
SetType(0x801401BC, "void AddCbolt__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int micaster, int id, int dam)")
del_items(0x801403B0)
SetType(0x801403B0, "void AddHbolt__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int micaster, int id, int dam)")
del_items(0x80140570)
SetType(0x80140570, "void AddResurrect__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x801405E4)
SetType(0x801405E4, "void AddResurrectBeam__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x8014066C)
SetType(0x8014066C, "void AddTelekinesis__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x801406D4)
SetType(0x801406D4, "void AddBoneSpirit__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x801408D0)
SetType(0x801408D0, "void AddRportal__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80140970)
SetType(0x80140970, "void AddDiabApoca__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)")
del_items(0x80140AAC)
SetType(0x80140AAC, "int AddMissile__Fiiiiiiciii(int sx, int sy, int v1, int v2, int midir, int mitype, int micaster, int id, int v3, int spllvl)")
del_items(0x80140DF8)
SetType(0x80140DF8, "int Sentfire__Fiii(int i, int sx, int sy)")
del_items(0x80140FDC)
SetType(0x80140FDC, "void MI_Dummy__Fi(int i)")
del_items(0x80140FE4)
SetType(0x80140FE4, "void MI_Golem__Fi(int i)")
del_items(0x80141240)
SetType(0x80141240, "void MI_SetManashield__Fi(int i)")
del_items(0x8014127C)
SetType(0x8014127C, "void MI_LArrow__Fi(int i)")
del_items(0x80141A38)
SetType(0x80141A38, "void MI_Arrow__Fi(int i)")
del_items(0x80141C54)
SetType(0x80141C54, "void MI_Firebolt__Fi(int i)")
del_items(0x80142320)
SetType(0x80142320, "void MI_Lightball__Fi(int i)")
del_items(0x801425A8)
SetType(0x801425A8, "void MI_Acidpud__Fi(int i)")
del_items(0x801426B8)
SetType(0x801426B8, "void MI_Firewall__Fi(int i)")
del_items(0x8014297C)
SetType(0x8014297C, "void MI_Fireball__Fi(int i)")
del_items(0x80143340)
SetType(0x80143340, "void MI_Lightctrl__Fi(int i)")
del_items(0x801436BC)
SetType(0x801436BC, "void MI_Lightning__Fi(int i)")
del_items(0x801437A8)
SetType(0x801437A8, "void MI_Town__Fi(int i)")
del_items(0x801439E0)
SetType(0x801439E0, "void MI_Flash__Fi(int i)")
del_items(0x80143D34)
SetType(0x80143D34, "void MI_Flash2__Fi(int i)")
del_items(0x80143EFC)
SetType(0x80143EFC, "void MI_Manashield__Fi(int i)")
del_items(0x80144220)
SetType(0x80144220, "void MI_Firemove__Fi(int i)")
del_items(0x801444AC)
SetType(0x801444AC, "void MI_Guardian__Fi(int i)")
del_items(0x8014475C)
SetType(0x8014475C, "void MI_Chain__Fi(int i)")
del_items(0x801449C8)
SetType(0x801449C8, "void MI_Misexp__Fi(int i)")
del_items(0x80144CC8)
SetType(0x80144CC8, "void MI_Acidsplat__Fi(int i)")
del_items(0x80144E64)
SetType(0x80144E64, "void MI_Teleport__Fi(int i)")
del_items(0x8014522C)
SetType(0x8014522C, "void MI_Stone__Fi(int i)")
del_items(0x801453D8)
SetType(0x801453D8, "void MI_Boom__Fi(int i)")
del_items(0x801454D0)
SetType(0x801454D0, "void MI_Rhino__Fi(int i)")
del_items(0x8014587C)
SetType(0x8014587C, "void MI_FirewallC__Fi(int i)")
del_items(0x80145B04)
SetType(0x80145B04, "void MI_Infra__Fi(int i)")
del_items(0x80145BBC)
SetType(0x80145BBC, "void MI_Apoca__Fi(int i)")
del_items(0x80145E50)
SetType(0x80145E50, "void MI_Wave__Fi(int i)")
del_items(0x8014634C)
SetType(0x8014634C, "void MI_Nova__Fi(int i)")
del_items(0x8014660C)
SetType(0x8014660C, "void MI_Flame__Fi(int i)")
del_items(0x80146804)
SetType(0x80146804, "void MI_Flamec__Fi(int i)")
del_items(0x80146A8C)
SetType(0x80146A8C, "void MI_Cbolt__Fi(int i)")
del_items(0x80146D90)
SetType(0x80146D90, "void MI_Hbolt__Fi(int i)")
del_items(0x8014709C)
SetType(0x8014709C, "void MI_Element__Fi(int i)")
del_items(0x80147754)
SetType(0x80147754, "void MI_Bonespirit__Fi(int i)")
del_items(0x80147B5C)
SetType(0x80147B5C, "void MI_ResurrectBeam__Fi(int i)")
del_items(0x80147BCC)
SetType(0x80147BCC, "void MI_Rportal__Fi(int i)")
del_items(0x80147DF0)
SetType(0x80147DF0, "void ProcessMissiles__Fv()")
del_items(0x801481E4)
SetType(0x801481E4, "void ClearMissileSpot__Fi(int mi)")
del_items(0x8014829C)
SetType(0x8014829C, "void MoveToScrollTarget__7CBlocks(struct CBlocks *this)")
del_items(0x801482B0)
SetType(0x801482B0, "void MonstPartJump__Fi(int m)")
del_items(0x80148444)
SetType(0x80148444, "void DeleteMonster__Fi(int i)")
del_items(0x8014847C)
SetType(0x8014847C, "int M_GetDir__Fi(int i)")
del_items(0x801484D8)
SetType(0x801484D8, "void M_StartDelay__Fii(int i, int len)")
del_items(0x80148520)
SetType(0x80148520, "void M_StartRAttack__Fiii(int i, int missile_type, int dam)")
del_items(0x80148638)
SetType(0x80148638, "void M_StartRSpAttack__Fiii(int i, int missile_type, int dam)")
del_items(0x8014875C)
SetType(0x8014875C, "void M_StartSpAttack__Fi(int i)")
del_items(0x80148844)
SetType(0x80148844, "void M_StartEat__Fi(int i)")
del_items(0x80148914)
SetType(0x80148914, "void M_GetKnockback__Fi(int i)")
del_items(0x80148AEC)
SetType(0x80148AEC, "void M_StartHit__Fiii(int i, int pnum, int dam)")
del_items(0x80148DE4)
SetType(0x80148DE4, "void M_DiabloDeath__FiUc(int i, unsigned char sendmsg)")
del_items(0x801490F4)
SetType(0x801490F4, "void M2MStartHit__Fiii(int mid, int i, int dam)")
del_items(0x801493A0)
SetType(0x801493A0, "void MonstStartKill__FiiUc(int i, int pnum, unsigned char sendmsg)")
del_items(0x8014968C)
SetType(0x8014968C, "void M2MStartKill__Fii(int i, int mid)")
del_items(0x80149A54)
SetType(0x80149A54, "void M_StartKill__Fii(int i, int pnum)")
del_items(0x80149B44)
SetType(0x80149B44, "void M_StartFadein__FiiUc(int i, int md, unsigned char backwards)")
del_items(0x80149C98)
SetType(0x80149C98, "void M_StartFadeout__FiiUc(int i, int md, unsigned char backwards)")
del_items(0x80149DE0)
SetType(0x80149DE0, "void M_StartHeal__Fi(int i)")
del_items(0x80149E60)
SetType(0x80149E60, "void M_ChangeLightOffset__Fi(int monst)")
del_items(0x80149F00)
SetType(0x80149F00, "int M_DoStand__Fi(int i)")
del_items(0x80149F68)
SetType(0x80149F68, "int M_DoWalk__Fi(int i)")
del_items(0x8014A1EC)
SetType(0x8014A1EC, "int M_DoWalk2__Fi(int i)")
del_items(0x8014A3D8)
SetType(0x8014A3D8, "int M_DoWalk3__Fi(int i)")
del_items(0x8014A69C)
SetType(0x8014A69C, "void M_TryM2MHit__Fiiiii(int i, int mid, int hper, int mind, int maxd)")
del_items(0x8014A864)
SetType(0x8014A864, "void M_TryH2HHit__Fiiiii(int i, int pnum, int Hit, int MinDam, int MaxDam)")
del_items(0x8014AE78)
SetType(0x8014AE78, "int M_DoAttack__Fi(int i)")
del_items(0x8014B01C)
SetType(0x8014B01C, "int M_DoRAttack__Fi(int i)")
del_items(0x8014B194)
SetType(0x8014B194, "int M_DoRSpAttack__Fi(int i)")
del_items(0x8014B384)
SetType(0x8014B384, "int M_DoSAttack__Fi(int i)")
del_items(0x8014B458)
SetType(0x8014B458, "int M_DoFadein__Fi(int i)")
del_items(0x8014B528)
SetType(0x8014B528, "int M_DoFadeout__Fi(int i)")
del_items(0x8014B63C)
SetType(0x8014B63C, "int M_DoHeal__Fi(int i)")
del_items(0x8014B6E8)
SetType(0x8014B6E8, "int M_DoTalk__Fi(int i)")
del_items(0x8014BB74)
SetType(0x8014BB74, "void M_Teleport__Fi(int i)")
del_items(0x8014BDA8)
SetType(0x8014BDA8, "int M_DoGotHit__Fi(int i)")
del_items(0x8014BE08)
SetType(0x8014BE08, "void DoEnding__Fv()")
del_items(0x8014BE9C)
SetType(0x8014BE9C, "void PrepDoEnding__Fv()")
del_items(0x8014BFB4)
SetType(0x8014BFB4, "int M_DoDeath__Fi(int i)")
del_items(0x8014C184)
SetType(0x8014C184, "int M_DoSpStand__Fi(int i)")
del_items(0x8014C228)
SetType(0x8014C228, "int M_DoDelay__Fi(int i)")
del_items(0x8014C318)
SetType(0x8014C318, "int M_DoStone__Fi(int i)")
del_items(0x8014C39C)
SetType(0x8014C39C, "void M_WalkDir__Fii(int i, int md)")
del_items(0x8014C5C4)
SetType(0x8014C5C4, "void GroupUnity__Fi(int i)")
del_items(0x8014C9B0)
SetType(0x8014C9B0, "unsigned char M_CallWalk__Fii(int i, int md)")
del_items(0x8014CB9C)
SetType(0x8014CB9C, "unsigned char M_PathWalk__Fi(int i, char plr2monst[9], unsigned char (*Check)())")
del_items(0x8014CC60)
SetType(0x8014CC60, "unsigned char M_CallWalk2__Fii(int i, int md)")
del_items(0x8014CD74)
SetType(0x8014CD74, "unsigned char M_DumbWalk__Fii(int i, int md)")
del_items(0x8014CDC8)
SetType(0x8014CDC8, "unsigned char M_RoundWalk__FiiRi(int i, int md, int *dir)")
del_items(0x8014CF68)
SetType(0x8014CF68, "void MAI_Zombie__Fi(int i)")
del_items(0x8014D160)
SetType(0x8014D160, "void MAI_SkelSd__Fi(int i)")
del_items(0x8014D2F8)
SetType(0x8014D2F8, "void MAI_Snake__Fi(int i)")
del_items(0x8014D6DC)
SetType(0x8014D6DC, "void MAI_Bat__Fi(int i)")
del_items(0x8014DA94)
SetType(0x8014DA94, "void MAI_SkelBow__Fi(int i)")
del_items(0x8014DC78)
SetType(0x8014DC78, "void MAI_Fat__Fi(int i)")
del_items(0x8014DE28)
SetType(0x8014DE28, "void MAI_Sneak__Fi(int i)")
del_items(0x8014E214)
SetType(0x8014E214, "void MAI_Fireman__Fi(int i)")
del_items(0x8014E50C)
SetType(0x8014E50C, "void MAI_Fallen__Fi(int i)")
del_items(0x8014E828)
SetType(0x8014E828, "void MAI_Cleaver__Fi(int i)")
del_items(0x8014E910)
SetType(0x8014E910, "void MAI_Round__FiUc(int i, unsigned char special)")
del_items(0x8014ED7C)
SetType(0x8014ED7C, "void MAI_GoatMc__Fi(int i)")
del_items(0x8014ED9C)
SetType(0x8014ED9C, "void MAI_Ranged__FiiUc(int i, int missile_type, unsigned char special)")
del_items(0x8014EFBC)
SetType(0x8014EFBC, "void MAI_GoatBow__Fi(int i)")
del_items(0x8014EFE0)
SetType(0x8014EFE0, "void MAI_Succ__Fi(int i)")
del_items(0x8014F004)
SetType(0x8014F004, "void MAI_AcidUniq__Fi(int i)")
del_items(0x8014F028)
SetType(0x8014F028, "void MAI_Scav__Fi(int i)")
del_items(0x8014F440)
SetType(0x8014F440, "void MAI_Garg__Fi(int i)")
del_items(0x8014F620)
SetType(0x8014F620, "void MAI_RoundRanged__FiiUciUc(int i, int missile_type, unsigned char checkdoors, int dam, int lessmissiles)")
del_items(0x8014FB34)
SetType(0x8014FB34, "void MAI_Magma__Fi(int i)")
del_items(0x8014FB60)
SetType(0x8014FB60, "void MAI_Storm__Fi(int i)")
del_items(0x8014FB8C)
SetType(0x8014FB8C, "void MAI_Acid__Fi(int i)")
del_items(0x8014FBBC)
SetType(0x8014FBBC, "void MAI_Diablo__Fi(int i)")
del_items(0x8014FBE8)
SetType(0x8014FBE8, "void MAI_RR2__Fiii(int i, int mistype, int dam)")
del_items(0x801500E8)
SetType(0x801500E8, "void MAI_Mega__Fi(int i)")
del_items(0x8015010C)
SetType(0x8015010C, "void MAI_SkelKing__Fi(int i)")
del_items(0x80150648)
SetType(0x80150648, "void MAI_Rhino__Fi(int i)")
del_items(0x80150AF0)
SetType(0x80150AF0, "void MAI_Counselor__Fi(int i, unsigned char counsmiss[4], int _mx, int _my)")
del_items(0x80150FBC)
SetType(0x80150FBC, "void MAI_Garbud__Fi(int i)")
del_items(0x8015116C)
SetType(0x8015116C, "void MAI_Zhar__Fi(int i)")
del_items(0x80151364)
SetType(0x80151364, "void MAI_SnotSpil__Fi(int i)")
del_items(0x80151598)
SetType(0x80151598, "void MAI_Lazurus__Fi(int i)")
del_items(0x801517D8)
SetType(0x801517D8, "void MAI_Lazhelp__Fi(int i)")
del_items(0x801518F8)
SetType(0x801518F8, "void MAI_Lachdanan__Fi(int i)")
del_items(0x80151A88)
SetType(0x80151A88, "void MAI_Warlord__Fi(int i)")
del_items(0x80151BD4)
SetType(0x80151BD4, "void DeleteMonsterList__Fv()")
del_items(0x80151CF0)
SetType(0x80151CF0, "void ProcessMonsters__Fv()")
del_items(0x80152278)
SetType(0x80152278, "unsigned char DirOK__Fii(int i, int mdir)")
del_items(0x80152660)
SetType(0x80152660, "unsigned char PosOkMissile__Fii(int x, int y)")
del_items(0x801526C8)
SetType(0x801526C8, "unsigned char CheckNoSolid__Fii(int x, int y)")
del_items(0x8015270C)
SetType(0x8015270C, "unsigned char LineClearF__FPFii_Uciiii(unsigned char (*Clear)(), int x1, int y1, int x2, int y2)")
del_items(0x80152994)
SetType(0x80152994, "unsigned char LineClear__Fiiii(int x1, int y1, int x2, int y2)")
del_items(0x801529D4)
SetType(0x801529D4, "unsigned char LineClearF1__FPFiii_Uciiiii(unsigned char (*Clear)(), int monst, int x1, int y1, int x2, int y2)")
del_items(0x80152C68)
SetType(0x80152C68, "void M_FallenFear__Fii(int x, int y)")
del_items(0x80152E38)
SetType(0x80152E38, "void PrintMonstHistory__Fi(int mt)")
del_items(0x801530EC)
SetType(0x801530EC, "void PrintUniqueHistory__Fv()")
del_items(0x80153210)
SetType(0x80153210, "void MissToMonst__Fiii(int i, int x, int y)")
del_items(0x80153674)
SetType(0x80153674, "unsigned char PosOkMonst2__Fiii(int i, int x, int y)")
del_items(0x80153890)
SetType(0x80153890, "unsigned char PosOkMonst3__Fiii(int i, int x, int y)")
del_items(0x80153B84)
SetType(0x80153B84, "int M_SpawnSkel__Fiii(int x, int y, int dir)")
del_items(0x80153CDC)
SetType(0x80153CDC, "void TalktoMonster__Fi(int i)")
del_items(0x80153DFC)
SetType(0x80153DFC, "void SpawnGolum__Fiiii(int i, int x, int y, int mi)")
del_items(0x80154054)
SetType(0x80154054, "unsigned char CanTalkToMonst__Fi(int m)")
del_items(0x8015408C)
SetType(0x8015408C, "unsigned char CheckMonsterHit__FiRUc(int m, unsigned char *ret)")
del_items(0x80154158)
SetType(0x80154158, "void MAI_Golum__Fi(int i)")
del_items(0x801544CC)
SetType(0x801544CC, "unsigned char MAI_Path__Fi(int i)")
del_items(0x80154630)
SetType(0x80154630, "void M_StartAttack__Fi(int i)")
del_items(0x80154718)
SetType(0x80154718, "void M_StartWalk__Fiiiiii(int i, int xvel, int yvel, int xadd, int yadd, int EndDir)")
del_items(0x80154878)
SetType(0x80154878, "void FreeInvGFX__Fv()")
del_items(0x80154880)
SetType(0x80154880, "void InvDrawSlot__Fiii(int X, int Y, int Frame)")
del_items(0x80154904)
SetType(0x80154904, "void InvDrawSlotBack__FiiiiUc(int X, int Y, int W, int H, int Flag)")
del_items(0x80154B58)
SetType(0x80154B58, "void InvDrawItem__FiiiUci(int ItemX, int ItemY, int ItemNo, unsigned char StatFlag, int TransFlag)")
del_items(0x80154C28)
SetType(0x80154C28, "void InvDrawSlots__Fv()")
del_items(0x80154F00)
SetType(0x80154F00, "void PrintStat__FiiPcUc(int Y, int Txt0, char *Txt1, unsigned char Col)")
del_items(0x80154FCC)
SetType(0x80154FCC, "void DrawInvStats__Fv()")
del_items(0x80155AE8)
SetType(0x80155AE8, "void DrawInvBack__Fv()")
del_items(0x80155B70)
SetType(0x80155B70, "void DrawInvCursor__Fv()")
del_items(0x8015604C)
SetType(0x8015604C, "void DrawInvMsg__Fv()")
del_items(0x80156214)
SetType(0x80156214, "void DrawInvUnique__Fv()")
del_items(0x80156338)
SetType(0x80156338, "void DrawInv__Fv()")
del_items(0x80156378)
SetType(0x80156378, "void DrawInvTSK__FP4TASK(struct TASK *T)")
del_items(0x801566C4)
SetType(0x801566C4, "void DoThatDrawInv__Fv()")
del_items(0x80156E8C)
SetType(0x80156E8C, "unsigned char AutoPlace__FiiiiUc(int pnum, int ii, int sx, int sy, int saveflag)")
del_items(0x801571AC)
SetType(0x801571AC, "unsigned char SpecialAutoPlace__FiiiiUc(int pnum, int ii, int sx, int sy, int saveflag)")
del_items(0x80157548)
SetType(0x80157548, "unsigned char GoldAutoPlace__Fi(int pnum)")
del_items(0x80157A18)
SetType(0x80157A18, "unsigned char WeaponAutoPlace__Fi(int pnum)")
del_items(0x80157CA4)
SetType(0x80157CA4, "int SwapItem__FP10ItemStructT0(struct ItemStruct *a, struct ItemStruct *b)")
del_items(0x80157DA0)
SetType(0x80157DA0, "void CheckInvPaste__Fiii(int pnum, int mx, int my)")
del_items(0x80159A8C)
SetType(0x80159A8C, "void CheckInvCut__Fiii(int pnum, int mx, int my)")
del_items(0x8015A53C)
SetType(0x8015A53C, "void RemoveInvItem__Fii(int pnum, int iv)")
del_items(0x8015A7E4)
SetType(0x8015A7E4, "void RemoveSpdBarItem__Fii(int pnum, int iv)")
del_items(0x8015A8D8)
SetType(0x8015A8D8, "void CheckInvScrn__Fv()")
del_items(0x8015A950)
SetType(0x8015A950, "void CheckItemStats__Fi(int pnum)")
del_items(0x8015A9D4)
SetType(0x8015A9D4, "void CheckBookLevel__Fi(int pnum)")
del_items(0x8015AB08)
SetType(0x8015AB08, "void CheckQuestItem__Fi(int pnum)")
del_items(0x8015AF30)
SetType(0x8015AF30, "void InvGetItem__Fii(int pnum, int ii)")
del_items(0x8015B22C)
SetType(0x8015B22C, "void AutoGetItem__Fii(int pnum, int ii)")
del_items(0x8015BC9C)
SetType(0x8015BC9C, "int FindGetItem__FiUsi(int idx, unsigned short ci, int iseed)")
del_items(0x8015BD50)
SetType(0x8015BD50, "void SyncGetItem__FiiiUsi(int x, int y, int idx, unsigned short ci, int iseed)")
del_items(0x8015BEDC)
SetType(0x8015BEDC, "unsigned char TryInvPut__Fv()")
del_items(0x8015C0A4)
SetType(0x8015C0A4, "int InvPutItem__Fiii(int pnum, int x, int y)")
del_items(0x8015C54C)
SetType(0x8015C54C, "int SyncPutItem__FiiiiUsiUciiiiiUl(int pnum, int x, int y, int idx, int icreateinfo, int iseed, int Id, int dur, int mdur, int ch, int mch, int ivalue, unsigned long ibuff)")
del_items(0x8015CAA8)
SetType(0x8015CAA8, "char CheckInvHLight__Fv()")
del_items(0x8015CDF0)
SetType(0x8015CDF0, "void RemoveScroll__Fi(int pnum)")
del_items(0x8015CFD4)
SetType(0x8015CFD4, "unsigned char UseScroll__Fv()")
del_items(0x8015D23C)
SetType(0x8015D23C, "void UseStaffCharge__FP12PlayerStruct(struct PlayerStruct *ptrplr)")
del_items(0x8015D2A4)
SetType(0x8015D2A4, "unsigned char UseStaff__Fv()")
del_items(0x8015D364)
SetType(0x8015D364, "void StartGoldDrop__Fv()")
del_items(0x8015D460)
SetType(0x8015D460, "unsigned char UseInvItem__Fii(int pnum, int cii)")
del_items(0x8015D984)
SetType(0x8015D984, "void DoTelekinesis__Fv()")
del_items(0x8015DAAC)
SetType(0x8015DAAC, "long CalculateGold__Fi(int pnum)")
del_items(0x8015DBE4)
SetType(0x8015DBE4, "unsigned char DropItemBeforeTrig__Fv()")
del_items(0x8015DC3C)
SetType(0x8015DC3C, "void ControlInv__Fv()")
del_items(0x8015DF1C)
SetType(0x8015DF1C, "void InvGetItemWH__Fi(int Pos)")
del_items(0x8015E010)
SetType(0x8015E010, "void InvAlignObject__Fv()")
del_items(0x8015E1C4)
SetType(0x8015E1C4, "void InvSetItemCurs__Fv()")
del_items(0x8015E358)
SetType(0x8015E358, "void InvMoveCursLeft__Fv()")
del_items(0x8015E500)
SetType(0x8015E500, "void InvMoveCursRight__Fv()")
del_items(0x8015E7B4)
SetType(0x8015E7B4, "void InvMoveCursUp__Fv()")
del_items(0x8015E9AC)
SetType(0x8015E9AC, "void InvMoveCursDown__Fv()")
del_items(0x8015ECB4)
SetType(0x8015ECB4, "void DumpMonsters__7CBlocks(struct CBlocks *this)")
del_items(0x8015ECDC)
SetType(0x8015ECDC, "void Flush__4CPad(struct CPad *this)")
del_items(0x8015ED00)
SetType(0x8015ED00, "void SetRGB__6DialogUcUcUc(struct Dialog *this, unsigned char R, unsigned char G, unsigned char B)")
del_items(0x8015ED20)
SetType(0x8015ED20, "void SetBack__6Dialogi(struct Dialog *this, int Type)")
del_items(0x8015ED28)
SetType(0x8015ED28, "void SetBorder__6Dialogi(struct Dialog *this, int Type)")
del_items(0x8015ED30)
SetType(0x8015ED30, "int SetOTpos__6Dialogi(struct Dialog *this, int OT)")
del_items(0x8015ED3C)
SetType(0x8015ED3C, "void ___6Dialog(struct Dialog *this, int __in_chrg)")
del_items(0x8015ED64)
SetType(0x8015ED64, "struct Dialog *__6Dialog(struct Dialog *this)")
del_items(0x8015EDC0)
SetType(0x8015EDC0, "void StartAutomap__Fv()")
del_items(0x8015EDD0)
SetType(0x8015EDD0, "void AutomapUp__Fv()")
del_items(0x8015EDF0)
SetType(0x8015EDF0, "void AutomapDown__Fv()")
del_items(0x8015EE10)
SetType(0x8015EE10, "void AutomapLeft__Fv()")
del_items(0x8015EE30)
SetType(0x8015EE30, "void AutomapRight__Fv()")
del_items(0x8015EE50)
SetType(0x8015EE50, "struct LINE_F2 *AMGetLine__FUcUcUc(unsigned char R, unsigned char G, unsigned char B)")
del_items(0x8015EEFC)
SetType(0x8015EEFC, "void AmDrawLine__Fiiii(int x0, int y0, int x1, int y1)")
del_items(0x8015EF64)
SetType(0x8015EF64, "void AmDrawPlayer__Fiiii(int x0, int y0, int x1, int y1)")
del_items(0x8015EFCC)
SetType(0x8015EFCC, "void DrawAutomapPlr__Fv()")
del_items(0x8015F2DC)
SetType(0x8015F2DC, "void DrawAutoMapVertWall__Fiiii(int X, int Y, int Length, int asd)")
del_items(0x8015F3D0)
SetType(0x8015F3D0, "void DrawAutoMapHorzWall__Fiiii(int X, int Y, int Length, int asd)")
del_items(0x8015F4C4)
SetType(0x8015F4C4, "void DrawAutoMapVertDoor__Fii(int X, int Y)")
del_items(0x8015F698)
SetType(0x8015F698, "void DrawAutoMapHorzDoor__Fii(int X, int Y)")
del_items(0x8015F870)
SetType(0x8015F870, "void DrawAutoMapVertGrate__Fii(int X, int Y)")
del_items(0x8015F924)
SetType(0x8015F924, "void DrawAutoMapHorzGrate__Fii(int X, int Y)")
del_items(0x8015F9D8)
SetType(0x8015F9D8, "void DrawAutoMapSquare__Fii(int X, int Y)")
del_items(0x8015FB20)
SetType(0x8015FB20, "void DrawAutoMapStairs__Fii(int X, int Y)")
del_items(0x8015FD20)
SetType(0x8015FD20, "void DrawAutomap__Fv()")
del_items(0x8016018C)
SetType(0x8016018C, "void PRIM_GetPrim__FPP7LINE_F2(struct LINE_F2 **Prim)") | 0.216177 | 0.152127 |
from pathlib import Path
import pickle
from typing import Optional
from matplotlib import cm
import matplotlib.pyplot as plt
import numpy as np
import matplotlib as mpl
def jitter(x):
return x + np.random.normal(scale=0.13, size=(len(x),))
def feature_scale(arr):
mini, maxi = arr.min(), arr.max()
return (arr - mini) / (maxi - mini)
def confidence_plot(
confidences_E_N, proportions_E_N, axis, cmap: Optional[str] = "viridis"
):
assert confidences_E_N.shape == proportions_E_N.shape
cmap = cm.get_cmap(cmap)
E = confidences_E_N.shape[0]
for idx, (x, y) in enumerate(zip(confidences_E_N, proportions_E_N)):
axis.plot(x, y, label=f"epoch {idx + 1}", color=cmap(idx / E))
axis.set_title("Pseudo-label confidence on pool set")
axis.set_xlabel("Confidence threshold")
axis.set_ylabel("Proportion of predictions that\npass the confidence threshold")
def reliability_plot(
bins_E_M,
accuracies_E_N,
counts_E_N,
axis,
title: Optional[str] = "Reliability plot",
cmap: Optional[str] = "viridis",
):
assert accuracies_E_N.shape == counts_E_N.shape
cmap = cm.get_cmap(cmap)
E = bins_E_M.shape[0]
for idx, (x, y, c) in enumerate(zip(bins_E_M, accuracies_E_N, counts_E_N)):
y[c == 0] = np.nan
axis.scatter(
jitter(list(range(len(x) - 1))),
y,
label=f"epoch {idx + 1}",
color=cmap(idx / E),
)
bins = bins_E_M[0]
axis.set_xticklabels(
[f"({bins[idx]:.1f},{b:.1f}]" for idx, b in enumerate(bins[1:])], rotation=45
)
axis.set_xticks(range(len(bins) - 1))
axis.set_ylim(bottom=-0.05, top=1)
axis.set_ylabel("Accuracy of pseudo-label")
axis.set_xlabel("Confidence")
if title:
axis.set_title(title)
axis.set_yticks(np.arange(0, 1.1, 0.1))
axis.plot(
range(len(bins) - 1),
np.arange(0.1, 1.1, 0.1) - 0.05,
color="grey",
alpha=0.3,
linestyle="-.",
)
def reliability_hist_plot(
bins_E_M,
counts_E_N,
axis,
cmap: Optional[str] = "viridis",
xticklabels=True,
title="Confidence histogram",
bar=False,
):
cmap = cm.get_cmap(cmap)
E = bins_E_M.shape[0]
for idx, (x, y) in enumerate(zip(bins_E_M, counts_E_N)):
if bar:
axis.bar(
list(range(len(x) - 1)),
y / y.sum(),
label=f"epoch {idx + 1}",
color=cmap(idx / E),
)
else:
axis.scatter(
jitter(list(range(len(x) - 1))),
y / y.sum(),
label=f"epoch {idx + 1}",
color=cmap(idx / E),
)
bins = bins_E_M[0]
axis.set_xticklabels(
[f"({bins[idx]:.1f},{b:.1f}]" for idx, b in enumerate(bins[1:])], rotation=45
)
axis.set_ylim(top=1)
axis.set_ylabel("Proportion")
if xticklabels:
axis.set_xticks(range(len(bins) - 1))
axis.set_xlabel("Confidence")
else:
axis.set_xticks(())
axis.set_title(title)
# todo(harry): can accommodate iterations too
def ece_plot(ece_E, axis, label: Optional[str] = None, cmap: Optional[str] = "viridis"):
cmap = cm.get_cmap(cmap)
E = ece_E.shape[0]
if label:
axis.plot(range(1, E + 1), ece_E, label=label)
else:
axis.plot(range(1, E + 1), ece_E)
axis.set_title("Expected Calibration Error (ECE)")
axis.set_ylabel("ECE")
axis.set_xlabel("Epoch")
axis.set_xticks(range(1, E + 1))
axis.set_xticklabels(range(1, E + 1), rotation=45)
def plot_entropy(ent_E_N, num_classes, axis, cmap: Optional[str] = "viridis"):
cmap = cm.get_cmap(cmap)
bplot = axis.boxplot(ent_E_N.T, patch_artist=True, showfliers=False)
E = ent_E_N.shape[0]
max_ent = num_classes * ((-1 / num_classes) * np.log(1 / num_classes))
for e, patch in enumerate(bplot["boxes"]):
patch.set_facecolor(cmap(e / E))
axis.set_xlabel("Epoch")
axis.set_ylabel("Entropy")
axis.set_ylim(bottom=-0.05, top=max_ent)
axis.set_yticks(np.linspace(0, max_ent, 5))
axis.set_title("Entropy")
axis.set_xticklabels(range(1, E + 1), rotation=45)
# todo(harry): can accommodate iterations too
def plot_accuracy(pool_acc_E, val_acc_E, axis, cmap: Optional[str] = "viridis"):
cmap = cm.get_cmap(cmap)
E = pool_acc_E.shape[0]
assert val_acc_E.shape[0] == E
axis.plot(range(1, E + 1), pool_acc_E, label="pool")
axis.plot(range(1, E + 1), val_acc_E, label="val")
axis.set_title("Accuracy")
axis.set_xlabel("Epoch")
axis.set_ylabel("Accuracy")
axis.set_xticks(range(1, E + 1))
axis.set_xticklabels(range(1, E + 1), rotation=45)
axis.legend()
def plot_sample_size(metric: dict, axis):
y = metric["history"]["train_size"]
x = len(y)
axis.plot(range(1, x + 1), y)
axis.set_xticks(range(1, x + 1))
axis.set_title("Training set size")
axis.set_xlabel("Epoch")
axis.set_ylabel("Training set size")
axis.set_xticklabels(range(1, x + 1), rotation=45)
def plot_val_loss(metric: dict, axis):
y = metric["history"]["val_loss"]
x = len(y)
axis.plot(range(1, x + 1), y)
axis.set_xticks(range(1, x + 1))
axis.set_title("Validation Loss")
axis.set_xlabel("Epoch")
axis.set_ylabel("Loss")
axis.set_xticklabels(range(1, x + 1), rotation=45)
def get_val_acc(metric: dict):
return np.array(metric["history"]["val_acc"])
def plot_labelled_classes(metric: dict, axis):
x, y = np.unique(metric["labelled_classes"], return_counts=True)
axis.bar(x, y)
axis.set_xlabel("Class")
axis.set_ylabel("Counts")
axis.set_title("BALD-acquired classes (so far)")
def parse_calib_dir(calib_metrics: str):
def num_sort(fname: Path):
basename = fname.name
return int(basename[: basename.find("_")])
calib_metrics = Path(calib_metrics)
pkls = list(calib_metrics.rglob("*.pkl"))
pkls = sorted(pkls, key=num_sort)
buffer = []
for p in pkls:
with open(p, "rb") as fp:
buffer.append(pickle.load(fp))
confidences, proportions, accuracies = [], [], []
bins, bin_accuracy, counts, ece = [], [], [], []
entropy = []
per_acc = []
for b in buffer:
res = b["conf-thresh"]
confidences.append(res[0])
proportions.append(res[1])
accuracies.append(b["accuracy"])
res = b["ece"]
bins.append(res[0])
bin_accuracy.append(res[1])
counts.append(res[2])
# res[3] = mean confidence
ece.append(res[4])
entropy.append(b["entropy"])
if "per-instance-accuracy" in b:
per_acc.append(b["per-instance-accuracy"])
confidences_E_N = np.stack(confidences, axis=0)
proportions_E_N = np.stack(proportions, axis=0)
accuracies_E = np.stack(accuracies, axis=0)
bins_E_M = np.stack(bins, axis=0)
bin_accuracy_E_N = np.stack(bin_accuracy, axis=0)
counts_E_N = np.stack(counts, axis=0)
ece_E = np.stack(ece, axis=0)
try:
# can only do so if entropy is a non-jagged matrix (non-pool set calib)
entropy_E_N = np.stack(entropy, axis=0)
if per_acc:
per_acc_E_N = np.stack(per_acc, axis=0)
else:
per_acc_E_N = None
except:
entropy_E_N = None
per_acc_E_N = None
return (
confidences_E_N,
proportions_E_N,
accuracies_E,
bins_E_M,
bin_accuracy_E_N,
counts_E_N,
ece_E,
entropy_E_N,
per_acc_E_N,
)
def diagnostics(calib_metrics: str, metrics: str):
metrics = Path(metrics)
(
confidences_E_N,
proportions_E_N,
accuracies_E,
bins_E_M,
bin_accuracy_E_N,
counts_E_N,
ece_E,
entropy_E_N,
_,
) = parse_calib_dir(calib_metrics)
with open(metrics, "rb") as fp:
metrics = pickle.load(fp)
fig, axes = plt.subplots(3, 3, figsize=(3 * 5, 3 * 5))
axes = axes.flatten()
confidence_plot(confidences_E_N, proportions_E_N, axes[0])
ece_plot(ece_E, axes[1])
plot_val_loss(metrics, axes[2])
reliability_hist_plot(bins_E_M, counts_E_N, axes[3])
if entropy_E_N is not None:
plot_entropy(entropy_E_N, num_classes=10, axis=axes[4])
plot_labelled_classes(metrics, axis=axes[5])
reliability_plot(bins_E_M, bin_accuracy_E_N, counts_E_N, axes[6])
plot_accuracy(accuracies_E, get_val_acc(metrics), axis=axes[7])
plot_sample_size(metrics, axes[8])
plt.suptitle(f"Pool size = {entropy_E_N.shape[-1]:,}", y=1.0)
for i, ax in enumerate(axes):
if i % 3 == 0:
ax.grid()
fig.tight_layout()
def solo_reliability_plot(calib_metrics, title="Reliability plot", label="Iteration"):
(
confidences_E_N,
proportions_E_N,
accuracies_E,
bins_E_M,
bin_accuracy_E_N,
counts_E_N,
ece_E,
entropy_E_N,
_,
) = parse_calib_dir(calib_metrics)
fig = plt.figure(constrained_layout=True, figsize=(8, 8))
spec = fig.add_gridspec(
ncols=2,
nrows=2,
width_ratios=[29, 1],
height_ratios=[2, 7],
)
axes = [
fig.add_subplot(spec[0, 0]),
fig.add_subplot(spec[1, 0]),
fig.add_subplot(spec[:, -1]),
]
reliability_hist_plot(bins_E_M, counts_E_N, axes[0], xticklabels=False, title=title)
reliability_plot(bins_E_M, bin_accuracy_E_N, counts_E_N, axes[1], title=None)
norm = mpl.colors.Normalize(vmin=1, vmax=accuracies_E.shape[0])
fig.colorbar(
cm.ScalarMappable(norm=norm, cmap=cm.get_cmap("viridis")),
orientation="vertical",
label=label,
cax=axes[2],
)
def entropy_reliability_plot(calib_metrics, num_class=10):
*_, entropy_E_N, per_acc_E_N = parse_calib_dir(calib_metrics)
E = entropy_E_N.shape[0]
max_ent = -np.log(1 / num_class)
space = np.linspace(0, max_ent, 11)
fig = plt.figure(constrained_layout=True, figsize=(8, 8))
if E > 1:
spec = fig.add_gridspec(
ncols=2,
nrows=2,
width_ratios=[29, 1],
height_ratios=[2, 7],
)
axes = [
fig.add_subplot(spec[0, 0]),
fig.add_subplot(spec[1, 0]),
fig.add_subplot(spec[:, -1]),
]
else:
spec = fig.add_gridspec(ncols=1, nrows=2, height_ratios=[2, 7])
axes = [fig.add_subplot(spec[0, 0]), fig.add_subplot(spec[1, 0])]
for ent, acc in zip(entropy_E_N, per_acc_E_N):
y = []
x = []
p = []
for i, upper in enumerate(space[1:]):
lower = space[i]
mask = (ent > lower) & (ent <= upper)
mean_acc = acc[mask].mean()
prop = mask.mean()
y.append(mean_acc)
# (lower, upper]
x.append(f"({lower:.2f}, {upper:.2f}]")
p.append(prop)
if E == 1:
axes[1].bar(range(len(y)), y)
axes[0].bar(range(len(p)), p)
else:
raise NotImplementedError
axes[1].set_xticklabels(x, rotation=45, ha="right")
axes[1].set_xticks(range(len(y)))
axes[0].set_xticks(())
axes[0].set_xticklabels(())
axes[0].set_title("Reliability plot")
axes[0].set_ylabel("Proportion")
axes[1].set_ylabel("Accuracy")
axes[1].set_xlabel("Entropy")
# norm = mpl.colors.Normalize(vmin=1, vmax=accuracies_E.shape[0])
# fig.colorbar(cm.ScalarMappable(norm=norm, cmap=cm.get_cmap('viridis')),
# orientation='vertical', label=label, cax=axes[2]) | alr/training/diagnostics/__init__.py | from pathlib import Path
import pickle
from typing import Optional
from matplotlib import cm
import matplotlib.pyplot as plt
import numpy as np
import matplotlib as mpl
def jitter(x):
return x + np.random.normal(scale=0.13, size=(len(x),))
def feature_scale(arr):
mini, maxi = arr.min(), arr.max()
return (arr - mini) / (maxi - mini)
def confidence_plot(
confidences_E_N, proportions_E_N, axis, cmap: Optional[str] = "viridis"
):
assert confidences_E_N.shape == proportions_E_N.shape
cmap = cm.get_cmap(cmap)
E = confidences_E_N.shape[0]
for idx, (x, y) in enumerate(zip(confidences_E_N, proportions_E_N)):
axis.plot(x, y, label=f"epoch {idx + 1}", color=cmap(idx / E))
axis.set_title("Pseudo-label confidence on pool set")
axis.set_xlabel("Confidence threshold")
axis.set_ylabel("Proportion of predictions that\npass the confidence threshold")
def reliability_plot(
bins_E_M,
accuracies_E_N,
counts_E_N,
axis,
title: Optional[str] = "Reliability plot",
cmap: Optional[str] = "viridis",
):
assert accuracies_E_N.shape == counts_E_N.shape
cmap = cm.get_cmap(cmap)
E = bins_E_M.shape[0]
for idx, (x, y, c) in enumerate(zip(bins_E_M, accuracies_E_N, counts_E_N)):
y[c == 0] = np.nan
axis.scatter(
jitter(list(range(len(x) - 1))),
y,
label=f"epoch {idx + 1}",
color=cmap(idx / E),
)
bins = bins_E_M[0]
axis.set_xticklabels(
[f"({bins[idx]:.1f},{b:.1f}]" for idx, b in enumerate(bins[1:])], rotation=45
)
axis.set_xticks(range(len(bins) - 1))
axis.set_ylim(bottom=-0.05, top=1)
axis.set_ylabel("Accuracy of pseudo-label")
axis.set_xlabel("Confidence")
if title:
axis.set_title(title)
axis.set_yticks(np.arange(0, 1.1, 0.1))
axis.plot(
range(len(bins) - 1),
np.arange(0.1, 1.1, 0.1) - 0.05,
color="grey",
alpha=0.3,
linestyle="-.",
)
def reliability_hist_plot(
bins_E_M,
counts_E_N,
axis,
cmap: Optional[str] = "viridis",
xticklabels=True,
title="Confidence histogram",
bar=False,
):
cmap = cm.get_cmap(cmap)
E = bins_E_M.shape[0]
for idx, (x, y) in enumerate(zip(bins_E_M, counts_E_N)):
if bar:
axis.bar(
list(range(len(x) - 1)),
y / y.sum(),
label=f"epoch {idx + 1}",
color=cmap(idx / E),
)
else:
axis.scatter(
jitter(list(range(len(x) - 1))),
y / y.sum(),
label=f"epoch {idx + 1}",
color=cmap(idx / E),
)
bins = bins_E_M[0]
axis.set_xticklabels(
[f"({bins[idx]:.1f},{b:.1f}]" for idx, b in enumerate(bins[1:])], rotation=45
)
axis.set_ylim(top=1)
axis.set_ylabel("Proportion")
if xticklabels:
axis.set_xticks(range(len(bins) - 1))
axis.set_xlabel("Confidence")
else:
axis.set_xticks(())
axis.set_title(title)
# todo(harry): can accommodate iterations too
def ece_plot(ece_E, axis, label: Optional[str] = None, cmap: Optional[str] = "viridis"):
cmap = cm.get_cmap(cmap)
E = ece_E.shape[0]
if label:
axis.plot(range(1, E + 1), ece_E, label=label)
else:
axis.plot(range(1, E + 1), ece_E)
axis.set_title("Expected Calibration Error (ECE)")
axis.set_ylabel("ECE")
axis.set_xlabel("Epoch")
axis.set_xticks(range(1, E + 1))
axis.set_xticklabels(range(1, E + 1), rotation=45)
def plot_entropy(ent_E_N, num_classes, axis, cmap: Optional[str] = "viridis"):
cmap = cm.get_cmap(cmap)
bplot = axis.boxplot(ent_E_N.T, patch_artist=True, showfliers=False)
E = ent_E_N.shape[0]
max_ent = num_classes * ((-1 / num_classes) * np.log(1 / num_classes))
for e, patch in enumerate(bplot["boxes"]):
patch.set_facecolor(cmap(e / E))
axis.set_xlabel("Epoch")
axis.set_ylabel("Entropy")
axis.set_ylim(bottom=-0.05, top=max_ent)
axis.set_yticks(np.linspace(0, max_ent, 5))
axis.set_title("Entropy")
axis.set_xticklabels(range(1, E + 1), rotation=45)
# todo(harry): can accommodate iterations too
def plot_accuracy(pool_acc_E, val_acc_E, axis, cmap: Optional[str] = "viridis"):
cmap = cm.get_cmap(cmap)
E = pool_acc_E.shape[0]
assert val_acc_E.shape[0] == E
axis.plot(range(1, E + 1), pool_acc_E, label="pool")
axis.plot(range(1, E + 1), val_acc_E, label="val")
axis.set_title("Accuracy")
axis.set_xlabel("Epoch")
axis.set_ylabel("Accuracy")
axis.set_xticks(range(1, E + 1))
axis.set_xticklabels(range(1, E + 1), rotation=45)
axis.legend()
def plot_sample_size(metric: dict, axis):
y = metric["history"]["train_size"]
x = len(y)
axis.plot(range(1, x + 1), y)
axis.set_xticks(range(1, x + 1))
axis.set_title("Training set size")
axis.set_xlabel("Epoch")
axis.set_ylabel("Training set size")
axis.set_xticklabels(range(1, x + 1), rotation=45)
def plot_val_loss(metric: dict, axis):
y = metric["history"]["val_loss"]
x = len(y)
axis.plot(range(1, x + 1), y)
axis.set_xticks(range(1, x + 1))
axis.set_title("Validation Loss")
axis.set_xlabel("Epoch")
axis.set_ylabel("Loss")
axis.set_xticklabels(range(1, x + 1), rotation=45)
def get_val_acc(metric: dict):
return np.array(metric["history"]["val_acc"])
def plot_labelled_classes(metric: dict, axis):
x, y = np.unique(metric["labelled_classes"], return_counts=True)
axis.bar(x, y)
axis.set_xlabel("Class")
axis.set_ylabel("Counts")
axis.set_title("BALD-acquired classes (so far)")
def parse_calib_dir(calib_metrics: str):
def num_sort(fname: Path):
basename = fname.name
return int(basename[: basename.find("_")])
calib_metrics = Path(calib_metrics)
pkls = list(calib_metrics.rglob("*.pkl"))
pkls = sorted(pkls, key=num_sort)
buffer = []
for p in pkls:
with open(p, "rb") as fp:
buffer.append(pickle.load(fp))
confidences, proportions, accuracies = [], [], []
bins, bin_accuracy, counts, ece = [], [], [], []
entropy = []
per_acc = []
for b in buffer:
res = b["conf-thresh"]
confidences.append(res[0])
proportions.append(res[1])
accuracies.append(b["accuracy"])
res = b["ece"]
bins.append(res[0])
bin_accuracy.append(res[1])
counts.append(res[2])
# res[3] = mean confidence
ece.append(res[4])
entropy.append(b["entropy"])
if "per-instance-accuracy" in b:
per_acc.append(b["per-instance-accuracy"])
confidences_E_N = np.stack(confidences, axis=0)
proportions_E_N = np.stack(proportions, axis=0)
accuracies_E = np.stack(accuracies, axis=0)
bins_E_M = np.stack(bins, axis=0)
bin_accuracy_E_N = np.stack(bin_accuracy, axis=0)
counts_E_N = np.stack(counts, axis=0)
ece_E = np.stack(ece, axis=0)
try:
# can only do so if entropy is a non-jagged matrix (non-pool set calib)
entropy_E_N = np.stack(entropy, axis=0)
if per_acc:
per_acc_E_N = np.stack(per_acc, axis=0)
else:
per_acc_E_N = None
except:
entropy_E_N = None
per_acc_E_N = None
return (
confidences_E_N,
proportions_E_N,
accuracies_E,
bins_E_M,
bin_accuracy_E_N,
counts_E_N,
ece_E,
entropy_E_N,
per_acc_E_N,
)
def diagnostics(calib_metrics: str, metrics: str):
metrics = Path(metrics)
(
confidences_E_N,
proportions_E_N,
accuracies_E,
bins_E_M,
bin_accuracy_E_N,
counts_E_N,
ece_E,
entropy_E_N,
_,
) = parse_calib_dir(calib_metrics)
with open(metrics, "rb") as fp:
metrics = pickle.load(fp)
fig, axes = plt.subplots(3, 3, figsize=(3 * 5, 3 * 5))
axes = axes.flatten()
confidence_plot(confidences_E_N, proportions_E_N, axes[0])
ece_plot(ece_E, axes[1])
plot_val_loss(metrics, axes[2])
reliability_hist_plot(bins_E_M, counts_E_N, axes[3])
if entropy_E_N is not None:
plot_entropy(entropy_E_N, num_classes=10, axis=axes[4])
plot_labelled_classes(metrics, axis=axes[5])
reliability_plot(bins_E_M, bin_accuracy_E_N, counts_E_N, axes[6])
plot_accuracy(accuracies_E, get_val_acc(metrics), axis=axes[7])
plot_sample_size(metrics, axes[8])
plt.suptitle(f"Pool size = {entropy_E_N.shape[-1]:,}", y=1.0)
for i, ax in enumerate(axes):
if i % 3 == 0:
ax.grid()
fig.tight_layout()
def solo_reliability_plot(calib_metrics, title="Reliability plot", label="Iteration"):
(
confidences_E_N,
proportions_E_N,
accuracies_E,
bins_E_M,
bin_accuracy_E_N,
counts_E_N,
ece_E,
entropy_E_N,
_,
) = parse_calib_dir(calib_metrics)
fig = plt.figure(constrained_layout=True, figsize=(8, 8))
spec = fig.add_gridspec(
ncols=2,
nrows=2,
width_ratios=[29, 1],
height_ratios=[2, 7],
)
axes = [
fig.add_subplot(spec[0, 0]),
fig.add_subplot(spec[1, 0]),
fig.add_subplot(spec[:, -1]),
]
reliability_hist_plot(bins_E_M, counts_E_N, axes[0], xticklabels=False, title=title)
reliability_plot(bins_E_M, bin_accuracy_E_N, counts_E_N, axes[1], title=None)
norm = mpl.colors.Normalize(vmin=1, vmax=accuracies_E.shape[0])
fig.colorbar(
cm.ScalarMappable(norm=norm, cmap=cm.get_cmap("viridis")),
orientation="vertical",
label=label,
cax=axes[2],
)
def entropy_reliability_plot(calib_metrics, num_class=10):
*_, entropy_E_N, per_acc_E_N = parse_calib_dir(calib_metrics)
E = entropy_E_N.shape[0]
max_ent = -np.log(1 / num_class)
space = np.linspace(0, max_ent, 11)
fig = plt.figure(constrained_layout=True, figsize=(8, 8))
if E > 1:
spec = fig.add_gridspec(
ncols=2,
nrows=2,
width_ratios=[29, 1],
height_ratios=[2, 7],
)
axes = [
fig.add_subplot(spec[0, 0]),
fig.add_subplot(spec[1, 0]),
fig.add_subplot(spec[:, -1]),
]
else:
spec = fig.add_gridspec(ncols=1, nrows=2, height_ratios=[2, 7])
axes = [fig.add_subplot(spec[0, 0]), fig.add_subplot(spec[1, 0])]
for ent, acc in zip(entropy_E_N, per_acc_E_N):
y = []
x = []
p = []
for i, upper in enumerate(space[1:]):
lower = space[i]
mask = (ent > lower) & (ent <= upper)
mean_acc = acc[mask].mean()
prop = mask.mean()
y.append(mean_acc)
# (lower, upper]
x.append(f"({lower:.2f}, {upper:.2f}]")
p.append(prop)
if E == 1:
axes[1].bar(range(len(y)), y)
axes[0].bar(range(len(p)), p)
else:
raise NotImplementedError
axes[1].set_xticklabels(x, rotation=45, ha="right")
axes[1].set_xticks(range(len(y)))
axes[0].set_xticks(())
axes[0].set_xticklabels(())
axes[0].set_title("Reliability plot")
axes[0].set_ylabel("Proportion")
axes[1].set_ylabel("Accuracy")
axes[1].set_xlabel("Entropy")
# norm = mpl.colors.Normalize(vmin=1, vmax=accuracies_E.shape[0])
# fig.colorbar(cm.ScalarMappable(norm=norm, cmap=cm.get_cmap('viridis')),
# orientation='vertical', label=label, cax=axes[2]) | 0.702836 | 0.504272 |
from itertools import accumulate
from bisect import bisect_right
import random
def basic_selection(population):
"""
:param population: Population Object
:return: Individual Obejct
"""
return random.choice(population.individuals)
def fitnetss_proporitional(population):
"""
:param population: Population Object
:return: Individual Obejct
"""
# Normalize fitness values for all individuals.
fits = [(1 / population.get_disctance(indv)) for indv in population.individuals]
min_fit = min(fits)
fits = [(fit - min_fit) for fit in fits]
# Create roulette wheel.
sum_fit = sum(fits)
wheel = list(accumulate([(fit / sum_fit) for fit in fits]))
# Select an individual.
idx = bisect_right(wheel, random.random())
return population.individuals[idx]
def rank_based(population, pmin=0.1, pmax=0.9):
"""
:param population: Population Object
:param pmin: minimum probability of being selected
:param pmax: maximum probability of being selected
:return: Individual Obejct
"""
# Initialize parameters.
n = population.unit_num
sorted_indvs = sorted(population.individuals, key=population.get_disctance, reverse=True)
# Assign selection probabilities linearly.
p = lambda i: pmin + (pmax - pmin) * (i - 1) / (n - 1)
ps = [p(i) for i in range(1, n+1)]
# Normalize probabilities.
sum_p = sum(ps)
wheel = list(accumulate([(p / sum_p) for p in ps]))
# Select an individual.
idx = bisect_right(wheel, random.random())
return sorted_indvs[idx]
def tournament_selection(population, tournament_size=2):
"""
:param population: Population Object
:param tournament_size: number of individuals participating in the tournament (default is 2)
:return: Individual Obejct
"""
# Competition function.
complete = lambda competitors: min(competitors, key=population.get_disctance)
# Check validity of tournament size.
if tournament_size > len(population.individuals):
msg = 'tournament size({}) is larger than population size({})'
raise ValueError(msg.format(tournament_size, len(population.individuals)))
# Pick the winner of the group and return it.
competitors = random.sample(population.individuals, tournament_size)
return complete(competitors) | EAlib/operators/selection.py |
from itertools import accumulate
from bisect import bisect_right
import random
def basic_selection(population):
"""
:param population: Population Object
:return: Individual Obejct
"""
return random.choice(population.individuals)
def fitnetss_proporitional(population):
"""
:param population: Population Object
:return: Individual Obejct
"""
# Normalize fitness values for all individuals.
fits = [(1 / population.get_disctance(indv)) for indv in population.individuals]
min_fit = min(fits)
fits = [(fit - min_fit) for fit in fits]
# Create roulette wheel.
sum_fit = sum(fits)
wheel = list(accumulate([(fit / sum_fit) for fit in fits]))
# Select an individual.
idx = bisect_right(wheel, random.random())
return population.individuals[idx]
def rank_based(population, pmin=0.1, pmax=0.9):
"""
:param population: Population Object
:param pmin: minimum probability of being selected
:param pmax: maximum probability of being selected
:return: Individual Obejct
"""
# Initialize parameters.
n = population.unit_num
sorted_indvs = sorted(population.individuals, key=population.get_disctance, reverse=True)
# Assign selection probabilities linearly.
p = lambda i: pmin + (pmax - pmin) * (i - 1) / (n - 1)
ps = [p(i) for i in range(1, n+1)]
# Normalize probabilities.
sum_p = sum(ps)
wheel = list(accumulate([(p / sum_p) for p in ps]))
# Select an individual.
idx = bisect_right(wheel, random.random())
return sorted_indvs[idx]
def tournament_selection(population, tournament_size=2):
"""
:param population: Population Object
:param tournament_size: number of individuals participating in the tournament (default is 2)
:return: Individual Obejct
"""
# Competition function.
complete = lambda competitors: min(competitors, key=population.get_disctance)
# Check validity of tournament size.
if tournament_size > len(population.individuals):
msg = 'tournament size({}) is larger than population size({})'
raise ValueError(msg.format(tournament_size, len(population.individuals)))
# Pick the winner of the group and return it.
competitors = random.sample(population.individuals, tournament_size)
return complete(competitors) | 0.752195 | 0.533154 |
# Python 2/3 compatibility
from __future__ import print_function
import os, numpy as np
import cv2 as cv
from tests_common import NewOpenCVTests
class aruco_test(NewOpenCVTests):
def test_idsAccessibility(self):
ids = np.arange(17)
rev_ids = ids[::-1]
aruco_dict = cv.aruco.Dictionary_get(cv.aruco.DICT_5X5_250)
board = cv.aruco.CharucoBoard_create(7, 5, 1, 0.5, aruco_dict)
np.testing.assert_array_equal(board.ids.squeeze(), ids)
board.ids = rev_ids
np.testing.assert_array_equal(board.ids.squeeze(), rev_ids)
board.setIds(ids)
np.testing.assert_array_equal(board.ids.squeeze(), ids)
with self.assertRaises(cv.error):
board.setIds(np.array([0]))
def test_drawCharucoDiamond(self):
aruco_dict = cv.aruco.Dictionary_get(cv.aruco.DICT_4X4_50)
img = cv.aruco.drawCharucoDiamond(aruco_dict, np.array([0, 1, 2, 3]), 100, 80)
self.assertTrue(img is not None)
def test_write_read_dict(self):
try:
aruco_dict = cv.aruco.getPredefinedDictionary(cv.aruco.DICT_5X5_50)
markers_gold = aruco_dict.bytesList
# write aruco_dict
filename = "test_dict.yml"
fs_write = cv.FileStorage(filename, cv.FileStorage_WRITE)
aruco_dict.writeDictionary(fs_write)
fs_write.release()
# reset aruco_dict
aruco_dict = cv.aruco.getPredefinedDictionary(cv.aruco.DICT_6X6_250)
# read aruco_dict
fs_read = cv.FileStorage(filename, cv.FileStorage_READ)
aruco_dict.readDictionary(fs_read.root())
fs_read.release()
# check equal
self.assertEqual(aruco_dict.markerSize, 5)
self.assertEqual(aruco_dict.maxCorrectionBits, 3)
np.testing.assert_array_equal(aruco_dict.bytesList, markers_gold)
finally:
if os.path.exists(filename):
os.remove(filename)
def test_identify(self):
aruco_dict = cv.aruco.Dictionary_get(cv.aruco.DICT_4X4_50)
expected_idx = 9
expected_rotation = 2
bit_marker = np.array([[0, 1, 1, 0], [1, 0, 1, 0], [1, 1, 1, 1], [0, 0, 1, 1]], dtype=np.uint8)
check, idx, rotation = aruco_dict.identify(bit_marker, 0)
self.assertTrue(check, True)
self.assertEqual(idx, expected_idx)
self.assertEqual(rotation, expected_rotation)
def test_getDistanceToId(self):
aruco_dict = cv.aruco.Dictionary_get(cv.aruco.DICT_4X4_50)
idx = 7
rotation = 3
bit_marker = np.array([[0, 1, 0, 1], [0, 1, 1, 1], [1, 1, 0, 0], [0, 1, 0, 0]], dtype=np.uint8)
dist = aruco_dict.getDistanceToId(bit_marker, idx)
self.assertEqual(dist, 0)
if __name__ == '__main__':
NewOpenCVTests.bootstrap() | modules/aruco/misc/python/test/test_aruco.py |
# Python 2/3 compatibility
from __future__ import print_function
import os, numpy as np
import cv2 as cv
from tests_common import NewOpenCVTests
class aruco_test(NewOpenCVTests):
def test_idsAccessibility(self):
ids = np.arange(17)
rev_ids = ids[::-1]
aruco_dict = cv.aruco.Dictionary_get(cv.aruco.DICT_5X5_250)
board = cv.aruco.CharucoBoard_create(7, 5, 1, 0.5, aruco_dict)
np.testing.assert_array_equal(board.ids.squeeze(), ids)
board.ids = rev_ids
np.testing.assert_array_equal(board.ids.squeeze(), rev_ids)
board.setIds(ids)
np.testing.assert_array_equal(board.ids.squeeze(), ids)
with self.assertRaises(cv.error):
board.setIds(np.array([0]))
def test_drawCharucoDiamond(self):
aruco_dict = cv.aruco.Dictionary_get(cv.aruco.DICT_4X4_50)
img = cv.aruco.drawCharucoDiamond(aruco_dict, np.array([0, 1, 2, 3]), 100, 80)
self.assertTrue(img is not None)
def test_write_read_dict(self):
try:
aruco_dict = cv.aruco.getPredefinedDictionary(cv.aruco.DICT_5X5_50)
markers_gold = aruco_dict.bytesList
# write aruco_dict
filename = "test_dict.yml"
fs_write = cv.FileStorage(filename, cv.FileStorage_WRITE)
aruco_dict.writeDictionary(fs_write)
fs_write.release()
# reset aruco_dict
aruco_dict = cv.aruco.getPredefinedDictionary(cv.aruco.DICT_6X6_250)
# read aruco_dict
fs_read = cv.FileStorage(filename, cv.FileStorage_READ)
aruco_dict.readDictionary(fs_read.root())
fs_read.release()
# check equal
self.assertEqual(aruco_dict.markerSize, 5)
self.assertEqual(aruco_dict.maxCorrectionBits, 3)
np.testing.assert_array_equal(aruco_dict.bytesList, markers_gold)
finally:
if os.path.exists(filename):
os.remove(filename)
def test_identify(self):
aruco_dict = cv.aruco.Dictionary_get(cv.aruco.DICT_4X4_50)
expected_idx = 9
expected_rotation = 2
bit_marker = np.array([[0, 1, 1, 0], [1, 0, 1, 0], [1, 1, 1, 1], [0, 0, 1, 1]], dtype=np.uint8)
check, idx, rotation = aruco_dict.identify(bit_marker, 0)
self.assertTrue(check, True)
self.assertEqual(idx, expected_idx)
self.assertEqual(rotation, expected_rotation)
def test_getDistanceToId(self):
aruco_dict = cv.aruco.Dictionary_get(cv.aruco.DICT_4X4_50)
idx = 7
rotation = 3
bit_marker = np.array([[0, 1, 0, 1], [0, 1, 1, 1], [1, 1, 0, 0], [0, 1, 0, 0]], dtype=np.uint8)
dist = aruco_dict.getDistanceToId(bit_marker, idx)
self.assertEqual(dist, 0)
if __name__ == '__main__':
NewOpenCVTests.bootstrap() | 0.682362 | 0.535281 |
import os
import copy
import logging
import argparse
from typing import *
logger = logging.getLogger(__name__)
def add_env_args_to_parser(parser):
# type: (argparse.ArgumentParser) -> None
parser.add_argument('--pd-work', required=False, default=None, help="Path to working directory")
parser.add_argument('--pd-data', required=False, default=None, help="Path to data directory")
parser.add_argument("-l", "--log", dest="loglevel", choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
help="Set the logging level", default='WARNING')
class Environment:
"""A class representing the project environment variables, including
paths to useful directories (data, code, etc...)
"""
def __init__(self, pd_data=None, pd_work=None, **kwargs):
# type: (str, str, Dict[str, Any]) -> None
self._env = Environment.load_environment_variables(pd_data, pd_work, **kwargs)
def __getitem__(self, item):
# type: (str) -> Any
return self._env[item]
def __setitem__(self, key, value):
# type: (str, Any) -> None
self._env[key] = value
def duplicate(self, new_values=None):
# type: (Dict[str, Any]) -> Environment
"""Creates a copy of the environment, with update variables
"""
new_env = copy.deepcopy(self)
if new_values is not None:
for item in new_values.keys():
new_env[item] = new_values[item]
return new_env
@classmethod
def init_from_argparse(cls, parser):
# type: (argparse.Namespace) -> Environment
return cls(pd_data=parser.pd_data, pd_work=parser.pd_work)
@staticmethod
def load_environment_variables(pd_data=None, pd_work=None, **kwargs):
# type: (str, str, Dict[str, Any]) -> Dict[str, str]
# path to current file
pd_current_file = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
# Structure:
pd_base = os.path.abspath(os.path.join(pd_current_file, "../../../../")) # get base level
pd_bin = os.path.join(pd_base, "bin")
pd_tmp = os.path.join(pd_base, "tmp")
pd_runs = os.path.join(pd_base, "runs")
pd_code = os.path.join(pd_base, "code")
pd_config = os.path.join(pd_base, "config")
pd_bin_external = os.path.join(pd_base, "bin_external")
pd_data = os.path.abspath(pd_data) if pd_data is not None else os.path.join(pd_base, "data")
pd_work = os.path.abspath(pd_work) if pd_work is not None else os.path.abspath(".")
if not os.path.exists(pd_work):
os.makedirs(pd_work)
env = {
"pd-base": pd_base, "pd-bin": pd_bin, "pd-tmp": pd_tmp, "pd-runs": pd_runs, "pd-code": pd_code,
"pd-config": pd_config, "pd-bin-external": pd_bin_external, "pd-data": pd_data, "pd-work": pd_work
}
import copy
global ENV
ENV = copy.deepcopy(env)
return env
ENV = Environment() | code/python/lib/mg_general/__init__.py |
import os
import copy
import logging
import argparse
from typing import *
logger = logging.getLogger(__name__)
def add_env_args_to_parser(parser):
# type: (argparse.ArgumentParser) -> None
parser.add_argument('--pd-work', required=False, default=None, help="Path to working directory")
parser.add_argument('--pd-data', required=False, default=None, help="Path to data directory")
parser.add_argument("-l", "--log", dest="loglevel", choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
help="Set the logging level", default='WARNING')
class Environment:
"""A class representing the project environment variables, including
paths to useful directories (data, code, etc...)
"""
def __init__(self, pd_data=None, pd_work=None, **kwargs):
# type: (str, str, Dict[str, Any]) -> None
self._env = Environment.load_environment_variables(pd_data, pd_work, **kwargs)
def __getitem__(self, item):
# type: (str) -> Any
return self._env[item]
def __setitem__(self, key, value):
# type: (str, Any) -> None
self._env[key] = value
def duplicate(self, new_values=None):
# type: (Dict[str, Any]) -> Environment
"""Creates a copy of the environment, with update variables
"""
new_env = copy.deepcopy(self)
if new_values is not None:
for item in new_values.keys():
new_env[item] = new_values[item]
return new_env
@classmethod
def init_from_argparse(cls, parser):
# type: (argparse.Namespace) -> Environment
return cls(pd_data=parser.pd_data, pd_work=parser.pd_work)
@staticmethod
def load_environment_variables(pd_data=None, pd_work=None, **kwargs):
# type: (str, str, Dict[str, Any]) -> Dict[str, str]
# path to current file
pd_current_file = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
# Structure:
pd_base = os.path.abspath(os.path.join(pd_current_file, "../../../../")) # get base level
pd_bin = os.path.join(pd_base, "bin")
pd_tmp = os.path.join(pd_base, "tmp")
pd_runs = os.path.join(pd_base, "runs")
pd_code = os.path.join(pd_base, "code")
pd_config = os.path.join(pd_base, "config")
pd_bin_external = os.path.join(pd_base, "bin_external")
pd_data = os.path.abspath(pd_data) if pd_data is not None else os.path.join(pd_base, "data")
pd_work = os.path.abspath(pd_work) if pd_work is not None else os.path.abspath(".")
if not os.path.exists(pd_work):
os.makedirs(pd_work)
env = {
"pd-base": pd_base, "pd-bin": pd_bin, "pd-tmp": pd_tmp, "pd-runs": pd_runs, "pd-code": pd_code,
"pd-config": pd_config, "pd-bin-external": pd_bin_external, "pd-data": pd_data, "pd-work": pd_work
}
import copy
global ENV
ENV = copy.deepcopy(env)
return env
ENV = Environment() | 0.598077 | 0.110759 |
import asyncio
import importlib
import json
import logging
import os
import pprint
import re
import sys
import time
import docker
import netaddr
import netifaces
import sh
import tornado.httpclient
from wotemu.enums import Labels
_CGROUP_PATH = "/proc/self/cgroup"
_STACK_NAMESPACE = "com.docker.stack.namespace"
_CID_HOST_LEN = 12
_STATE_RUNNING = "running"
_logger = logging.getLogger(__name__)
class NodeHTTPTimeout(Exception):
pass
async def _ping_catalogue(catalogue_url, thing_ids=None):
thing_ids = thing_ids or []
http_client = tornado.httpclient.AsyncHTTPClient()
try:
catalogue_res = await http_client.fetch(catalogue_url)
catalogue = json.loads(catalogue_res.body)
assert all(thing_id in catalogue for thing_id in thing_ids)
_logger.debug("Catalogue ping OK: %s", catalogue_url)
return True
except Exception as ex:
_logger.debug("Catalogue ping error (%s): %s", catalogue_url, repr(ex))
return False
finally:
http_client.close()
async def _ping_catalogue_timeout(catalogue_url, wait, timeout, thing_ids=None):
_logger.debug("Waiting for catalogue:\n%s", pprint.pformat({
"catalogue_url": catalogue_url,
"wait": wait,
"timeout": timeout,
"thing_ids": thing_ids
}))
ini = time.time()
def _raise_timeout():
if timeout is None:
return
diff = time.time() - ini
if diff >= timeout:
raise NodeHTTPTimeout(
f"HTTP timeout ({timeout} s): {catalogue_url}")
while True:
_raise_timeout()
if (await _ping_catalogue(catalogue_url, thing_ids=thing_ids)):
break
_raise_timeout()
await asyncio.sleep(wait)
async def wait_node(conf, name, wait=2, timeout=120, find_replicas=True, thing_ids=None):
cont_hosts = [name]
if find_replicas:
_logger.debug((
"Attempting to translate service name '%s' "
"to the container hostnames of all the "
"replicas for that service"
), name)
try:
cont_hosts = get_service_container_hostnames(
docker_url=conf.docker_proxy_url,
name=name)
except Exception as ex:
_logger.warning("Error finding container hostnames: %s", ex)
_logger.warning("Using untranslated service name: %s", cont_hosts)
catalogue_urls = [
"http://{}:{}".format(host, conf.port_catalogue)
for host in cont_hosts
]
_logger.debug("Catalogue URLs: %s", catalogue_urls)
ping_awaitables = [
_ping_catalogue_timeout(
catalogue_url=url,
wait=wait,
timeout=timeout,
thing_ids=thing_ids)
for url in catalogue_urls
]
await asyncio.gather(*ping_awaitables)
def _find_service_container_hosts(docker_api_client, service_name):
task_filters = {
"service": service_name,
"desired-state": _STATE_RUNNING
}
_logger.debug("Filtering Docker tasks using filters: %s", task_filters)
try:
service_tasks = docker_api_client.tasks(filters=task_filters)
except Exception as ex:
_logger.warning(
"Error finding Docker tasks (filters: %s): %s",
task_filters, ex)
return []
_logger.debug(
"Found %s tasks for service: %s",
len(service_tasks), service_name)
return [
task["Status"]["ContainerStatus"]["ContainerID"][:_CID_HOST_LEN]
for task in service_tasks
]
def get_service_container_hostnames(docker_url, name):
docker_api_client = docker.APIClient(base_url=docker_url)
_logger.debug("Finding container hostnames for: %s", name)
service_parts = name.split(".")
try:
network_candidate = service_parts[-1]
docker_api_client.inspect_network(network_candidate)
_logger.debug("Found network: %s", network_candidate)
base_name = ".".join(service_parts[:-1])
except docker.errors.NotFound:
_logger.debug("Network not found: %s", network_candidate)
base_name = name
namespace = get_current_stack_namespace(docker_url)
service_names = [f"{namespace}_" + base_name]
if base_name.startswith(f"{namespace}_"):
service_names.append(base_name)
ret = [
_find_service_container_hosts(
docker_api_client=docker_api_client,
service_name=service_name)
for service_name in service_names
]
ret = [host for item in ret for host in item]
if not len(ret):
raise Exception("Could not find container hostnames for: %s", name)
_logger.debug("Service %s container hostnames: %s", name, ret)
return ret
def ping_docker(docker_url):
try:
docker_client = docker.DockerClient(base_url=docker_url)
docker_client.ping()
except Exception as ex:
raise Exception("Could not ping Docker daemon: {}".format(ex))
def get_current_container_id():
try:
with open(_CGROUP_PATH, "r") as fh:
cgroup = fh.read()
except FileNotFoundError as ex:
raise Exception((
"The current environment does not "
"seem to be a Docker container ({})"
).format(ex))
cid_regex = r"\d+:.+:\/docker\/([a-zA-Z0-9]+)"
result = re.search(cid_regex, cgroup)
if not result or len(result.groups()) <= 0:
_logger.warning("Could not find container ID in:\n%s", cgroup)
raise Exception("Could not retrieve container ID")
cid = result.groups()[0]
_logger.debug("Current container ID: %s", cid)
return cid
def get_task_container_id(task_dict):
return task_dict.get("Status", {}).get("ContainerStatus", {}).get("ContainerID", None)
def get_current_task(docker_url):
docker_api_client = docker.APIClient(base_url=docker_url)
cid = get_current_container_id()
task = next((
task for task in docker_api_client.tasks()
if get_task_container_id(task) == cid), None)
if task is None:
raise Exception("Could not find task for container: {}".format(cid))
return task
def get_current_stack_namespace(docker_url):
curr_task = get_current_task(docker_url=docker_url)
return curr_task.get("Spec", {}).get("ContainerSpec", {}).get("Labels", {}).get(_STACK_NAMESPACE, None)
def get_task_networks(docker_url, task):
docker_api_client = docker.APIClient(base_url=docker_url)
network_ids = [
net["Network"]["ID"]
for net in task["NetworksAttachments"]
]
networks = {
net_id: docker_api_client.inspect_network(net_id)
for net_id in network_ids
}
networks = {
net_id: net_info for net_id, net_info in networks.items()
if net_info.get("Labels", {}).get(Labels.WOTEMU_NETWORK.value, None) is not None
}
return list(networks.keys())
def get_task_labels(docker_url, task_name):
docker_api_client = docker.APIClient(base_url=docker_url)
task_info = docker_api_client.inspect_task(task_name)
return task_info["Spec"]["ContainerSpec"]["Labels"]
def get_network_gateway_task(docker_url, network_id):
docker_api_client = docker.APIClient(base_url=docker_url)
network_info = docker_api_client.inspect_network(network_id, verbose=True)
service_infos = {
net_name: info
for net_name, info in network_info["Services"].items()
if len(net_name) > 0
}
_logger.debug(
"Network %s services:\n%s",
network_id,
pprint.pformat(list(service_infos.keys())))
task_infos = {
task_info["Name"]: task_info
for net_name, serv_info in service_infos.items()
for task_info in serv_info["Tasks"]
}
_logger.debug(
"Network %s tasks:\n%s",
network_id,
pprint.pformat(list(task_infos.keys())))
task_labels = {
task_name: get_task_labels(docker_url, task_name)
for task_name in task_infos.keys()
}
return next(
task_infos[task_name]
for task_name, labels in task_labels.items()
if labels.get(Labels.WOTEMU_GATEWAY.value, None) is not None)
def get_output_iface_for_task(net_task_dict):
task_name = net_task_dict["Name"]
task_addr = netaddr.IPAddress(net_task_dict["EndpointIP"])
iface_addrs = {
name: netifaces.ifaddresses(name).get(netifaces.AF_INET)
for name in netifaces.interfaces()
if netifaces.ifaddresses(name).get(netifaces.AF_INET)
}
_logger.debug(
"Current container interfaces:\n%s",
pprint.pformat(iface_addrs))
ret = next(
(iface_name, addr)
for iface_name, iface_addrs in iface_addrs.items()
for addr in iface_addrs
if task_addr in netaddr.IPNetwork("{}/{}".format(addr["addr"], addr["netmask"])))
_logger.debug("Output interface for %s: %s", task_name, ret)
return ret
def strip_ansi_codes(val):
"""Attribution to: https://stackoverflow.com/a/15780675"""
return re.sub(r'\x1b\[([0-9,A-Z]{1,2}(;[0-9]{1,2})?(;[0-9]{3})?)?[m|K]?', "", val)
def import_func(module_path, func_name):
_logger.debug("Attempting to import module: %s", module_path)
path_root, path_base = os.path.split(module_path)
if path_root not in sys.path:
sys.path.insert(0, path_root)
mod_name, _ext = os.path.splitext(path_base)
mod_import = importlib.import_module(mod_name)
mod_dir = dir(mod_import)
_logger.info("Imported: %s", mod_import)
_logger.debug("dir(%s): %s", mod_import, mod_dir)
if func_name not in mod_dir:
raise Exception("Module {} does not contain function '{}'".format(
mod_import, func_name))
return getattr(mod_import, func_name)
async def consume_from_catalogue(wot, port_catalogue, servient_host, thing_id):
http_client = tornado.httpclient.AsyncHTTPClient()
cat_url = "http://{}:{}".format(servient_host, port_catalogue)
_logger.debug("Fetching catalogue: %s", cat_url)
catalogue_res = await http_client.fetch(cat_url)
catalogue = json.loads(catalogue_res.body)
_logger.debug("Catalogue:\n%s", pprint.pformat(catalogue))
if thing_id not in catalogue:
raise Exception(f"Thing '{thing_id}' not in catalogue: {cat_url}")
td_url = "http://{}:{}/{}".format(
servient_host,
port_catalogue,
catalogue[thing_id].strip("/"))
_logger.debug("Consuming from URL: %s", td_url)
return await wot.consume_from_url(td_url)
def cgget(name):
try:
sh_cgget = sh.Command("cgget")
cmd_parts = ["-v", "-r", name, "/"]
proc = sh_cgget(cmd_parts, _err_to_out=True)
_logger.debug("%s: %s", proc.ran, proc.stdout)
match = re.search(r"(-?\d+)\n", proc.stdout.decode("utf8"))
return int(match.group(1)) if match else None
except:
_logger.warning("Error running cgget for: %s", exc_info=True)
return None | wotemu/utils.py | import asyncio
import importlib
import json
import logging
import os
import pprint
import re
import sys
import time
import docker
import netaddr
import netifaces
import sh
import tornado.httpclient
from wotemu.enums import Labels
_CGROUP_PATH = "/proc/self/cgroup"
_STACK_NAMESPACE = "com.docker.stack.namespace"
_CID_HOST_LEN = 12
_STATE_RUNNING = "running"
_logger = logging.getLogger(__name__)
class NodeHTTPTimeout(Exception):
pass
async def _ping_catalogue(catalogue_url, thing_ids=None):
thing_ids = thing_ids or []
http_client = tornado.httpclient.AsyncHTTPClient()
try:
catalogue_res = await http_client.fetch(catalogue_url)
catalogue = json.loads(catalogue_res.body)
assert all(thing_id in catalogue for thing_id in thing_ids)
_logger.debug("Catalogue ping OK: %s", catalogue_url)
return True
except Exception as ex:
_logger.debug("Catalogue ping error (%s): %s", catalogue_url, repr(ex))
return False
finally:
http_client.close()
async def _ping_catalogue_timeout(catalogue_url, wait, timeout, thing_ids=None):
_logger.debug("Waiting for catalogue:\n%s", pprint.pformat({
"catalogue_url": catalogue_url,
"wait": wait,
"timeout": timeout,
"thing_ids": thing_ids
}))
ini = time.time()
def _raise_timeout():
if timeout is None:
return
diff = time.time() - ini
if diff >= timeout:
raise NodeHTTPTimeout(
f"HTTP timeout ({timeout} s): {catalogue_url}")
while True:
_raise_timeout()
if (await _ping_catalogue(catalogue_url, thing_ids=thing_ids)):
break
_raise_timeout()
await asyncio.sleep(wait)
async def wait_node(conf, name, wait=2, timeout=120, find_replicas=True, thing_ids=None):
cont_hosts = [name]
if find_replicas:
_logger.debug((
"Attempting to translate service name '%s' "
"to the container hostnames of all the "
"replicas for that service"
), name)
try:
cont_hosts = get_service_container_hostnames(
docker_url=conf.docker_proxy_url,
name=name)
except Exception as ex:
_logger.warning("Error finding container hostnames: %s", ex)
_logger.warning("Using untranslated service name: %s", cont_hosts)
catalogue_urls = [
"http://{}:{}".format(host, conf.port_catalogue)
for host in cont_hosts
]
_logger.debug("Catalogue URLs: %s", catalogue_urls)
ping_awaitables = [
_ping_catalogue_timeout(
catalogue_url=url,
wait=wait,
timeout=timeout,
thing_ids=thing_ids)
for url in catalogue_urls
]
await asyncio.gather(*ping_awaitables)
def _find_service_container_hosts(docker_api_client, service_name):
task_filters = {
"service": service_name,
"desired-state": _STATE_RUNNING
}
_logger.debug("Filtering Docker tasks using filters: %s", task_filters)
try:
service_tasks = docker_api_client.tasks(filters=task_filters)
except Exception as ex:
_logger.warning(
"Error finding Docker tasks (filters: %s): %s",
task_filters, ex)
return []
_logger.debug(
"Found %s tasks for service: %s",
len(service_tasks), service_name)
return [
task["Status"]["ContainerStatus"]["ContainerID"][:_CID_HOST_LEN]
for task in service_tasks
]
def get_service_container_hostnames(docker_url, name):
docker_api_client = docker.APIClient(base_url=docker_url)
_logger.debug("Finding container hostnames for: %s", name)
service_parts = name.split(".")
try:
network_candidate = service_parts[-1]
docker_api_client.inspect_network(network_candidate)
_logger.debug("Found network: %s", network_candidate)
base_name = ".".join(service_parts[:-1])
except docker.errors.NotFound:
_logger.debug("Network not found: %s", network_candidate)
base_name = name
namespace = get_current_stack_namespace(docker_url)
service_names = [f"{namespace}_" + base_name]
if base_name.startswith(f"{namespace}_"):
service_names.append(base_name)
ret = [
_find_service_container_hosts(
docker_api_client=docker_api_client,
service_name=service_name)
for service_name in service_names
]
ret = [host for item in ret for host in item]
if not len(ret):
raise Exception("Could not find container hostnames for: %s", name)
_logger.debug("Service %s container hostnames: %s", name, ret)
return ret
def ping_docker(docker_url):
try:
docker_client = docker.DockerClient(base_url=docker_url)
docker_client.ping()
except Exception as ex:
raise Exception("Could not ping Docker daemon: {}".format(ex))
def get_current_container_id():
try:
with open(_CGROUP_PATH, "r") as fh:
cgroup = fh.read()
except FileNotFoundError as ex:
raise Exception((
"The current environment does not "
"seem to be a Docker container ({})"
).format(ex))
cid_regex = r"\d+:.+:\/docker\/([a-zA-Z0-9]+)"
result = re.search(cid_regex, cgroup)
if not result or len(result.groups()) <= 0:
_logger.warning("Could not find container ID in:\n%s", cgroup)
raise Exception("Could not retrieve container ID")
cid = result.groups()[0]
_logger.debug("Current container ID: %s", cid)
return cid
def get_task_container_id(task_dict):
return task_dict.get("Status", {}).get("ContainerStatus", {}).get("ContainerID", None)
def get_current_task(docker_url):
docker_api_client = docker.APIClient(base_url=docker_url)
cid = get_current_container_id()
task = next((
task for task in docker_api_client.tasks()
if get_task_container_id(task) == cid), None)
if task is None:
raise Exception("Could not find task for container: {}".format(cid))
return task
def get_current_stack_namespace(docker_url):
curr_task = get_current_task(docker_url=docker_url)
return curr_task.get("Spec", {}).get("ContainerSpec", {}).get("Labels", {}).get(_STACK_NAMESPACE, None)
def get_task_networks(docker_url, task):
docker_api_client = docker.APIClient(base_url=docker_url)
network_ids = [
net["Network"]["ID"]
for net in task["NetworksAttachments"]
]
networks = {
net_id: docker_api_client.inspect_network(net_id)
for net_id in network_ids
}
networks = {
net_id: net_info for net_id, net_info in networks.items()
if net_info.get("Labels", {}).get(Labels.WOTEMU_NETWORK.value, None) is not None
}
return list(networks.keys())
def get_task_labels(docker_url, task_name):
docker_api_client = docker.APIClient(base_url=docker_url)
task_info = docker_api_client.inspect_task(task_name)
return task_info["Spec"]["ContainerSpec"]["Labels"]
def get_network_gateway_task(docker_url, network_id):
docker_api_client = docker.APIClient(base_url=docker_url)
network_info = docker_api_client.inspect_network(network_id, verbose=True)
service_infos = {
net_name: info
for net_name, info in network_info["Services"].items()
if len(net_name) > 0
}
_logger.debug(
"Network %s services:\n%s",
network_id,
pprint.pformat(list(service_infos.keys())))
task_infos = {
task_info["Name"]: task_info
for net_name, serv_info in service_infos.items()
for task_info in serv_info["Tasks"]
}
_logger.debug(
"Network %s tasks:\n%s",
network_id,
pprint.pformat(list(task_infos.keys())))
task_labels = {
task_name: get_task_labels(docker_url, task_name)
for task_name in task_infos.keys()
}
return next(
task_infos[task_name]
for task_name, labels in task_labels.items()
if labels.get(Labels.WOTEMU_GATEWAY.value, None) is not None)
def get_output_iface_for_task(net_task_dict):
task_name = net_task_dict["Name"]
task_addr = netaddr.IPAddress(net_task_dict["EndpointIP"])
iface_addrs = {
name: netifaces.ifaddresses(name).get(netifaces.AF_INET)
for name in netifaces.interfaces()
if netifaces.ifaddresses(name).get(netifaces.AF_INET)
}
_logger.debug(
"Current container interfaces:\n%s",
pprint.pformat(iface_addrs))
ret = next(
(iface_name, addr)
for iface_name, iface_addrs in iface_addrs.items()
for addr in iface_addrs
if task_addr in netaddr.IPNetwork("{}/{}".format(addr["addr"], addr["netmask"])))
_logger.debug("Output interface for %s: %s", task_name, ret)
return ret
def strip_ansi_codes(val):
"""Attribution to: https://stackoverflow.com/a/15780675"""
return re.sub(r'\x1b\[([0-9,A-Z]{1,2}(;[0-9]{1,2})?(;[0-9]{3})?)?[m|K]?', "", val)
def import_func(module_path, func_name):
_logger.debug("Attempting to import module: %s", module_path)
path_root, path_base = os.path.split(module_path)
if path_root not in sys.path:
sys.path.insert(0, path_root)
mod_name, _ext = os.path.splitext(path_base)
mod_import = importlib.import_module(mod_name)
mod_dir = dir(mod_import)
_logger.info("Imported: %s", mod_import)
_logger.debug("dir(%s): %s", mod_import, mod_dir)
if func_name not in mod_dir:
raise Exception("Module {} does not contain function '{}'".format(
mod_import, func_name))
return getattr(mod_import, func_name)
async def consume_from_catalogue(wot, port_catalogue, servient_host, thing_id):
http_client = tornado.httpclient.AsyncHTTPClient()
cat_url = "http://{}:{}".format(servient_host, port_catalogue)
_logger.debug("Fetching catalogue: %s", cat_url)
catalogue_res = await http_client.fetch(cat_url)
catalogue = json.loads(catalogue_res.body)
_logger.debug("Catalogue:\n%s", pprint.pformat(catalogue))
if thing_id not in catalogue:
raise Exception(f"Thing '{thing_id}' not in catalogue: {cat_url}")
td_url = "http://{}:{}/{}".format(
servient_host,
port_catalogue,
catalogue[thing_id].strip("/"))
_logger.debug("Consuming from URL: %s", td_url)
return await wot.consume_from_url(td_url)
def cgget(name):
try:
sh_cgget = sh.Command("cgget")
cmd_parts = ["-v", "-r", name, "/"]
proc = sh_cgget(cmd_parts, _err_to_out=True)
_logger.debug("%s: %s", proc.ran, proc.stdout)
match = re.search(r"(-?\d+)\n", proc.stdout.decode("utf8"))
return int(match.group(1)) if match else None
except:
_logger.warning("Error running cgget for: %s", exc_info=True)
return None | 0.235284 | 0.081082 |
import os
import pytest
import caproto as ca
from caproto._headers import MessageHeader
def test_broadcast_auto_address_list():
pytest.importorskip('netifaces')
env = os.environ.copy()
try:
os.environ['EPICS_CA_ADDR_LIST'] = ''
os.environ['EPICS_CA_AUTO_ADDR_LIST'] = 'YES'
expected = set(bcast for addr, bcast in ca.get_netifaces_addresses())
assert set(ca.get_address_list()) == expected
finally:
os.environ.clear()
os.environ.update(env)
def test_ensure_bytes():
assert ca.ensure_bytes('abc') == b'abc\0'
assert ca.ensure_bytes(b'abc\0') == b'abc\0'
with pytest.raises(ca.CaprotoTypeError):
ca.ensure_bytes(1)
_incr_sends = [
[(b'abc', b'def', b'ghi'),
0,
(b'abc', b'def', b'ghi')
],
[(b'abc', b'def', b'ghi'),
1,
(b'bc', b'def', b'ghi')
],
[(b'abc', b'def', b'ghi'),
3,
(b'def', b'ghi')
],
[(MessageHeader(0, 1, 2, 3, 4, 5), b'def'),
0,
(bytes(MessageHeader(0, 1, 2, 3, 4, 5)), b'def'),
],
[(MessageHeader(0, 1, 2, 3, 4, 5), b'def'),
5,
(bytes(MessageHeader(0, 1, 2, 3, 4, 5))[5:], b'def'),
],
]
@pytest.mark.parametrize('buffers, offset, expected', _incr_sends)
def test_buffer_list_slice(buffers, offset, expected):
assert ca.buffer_list_slice(*buffers, offset=offset) == expected
@pytest.mark.parametrize('buffers, offset, expected', _incr_sends)
def test_incremental_send(buffers, offset, expected):
full_bytes = b''.join(bytes(b) for b in buffers)
gen = ca.incremental_buffer_list_slice(*buffers)
gen.send(None)
for i in range(len(full_bytes)):
try:
buffers = gen.send(1)
except StopIteration:
assert i == (len(full_bytes) - 1), 'StopIteration unexpected'
break
assert full_bytes[i + 1:] == b''.join(bytes(b) for b in buffers)
records_to_check = [
['x.NAME', ('x.NAME', 'x', 'NAME', None)],
['x.', ('x', 'x', None, None)],
['x', ('x', 'x', None, None)],
['x.NAME$',
('x.NAME', 'x', 'NAME',
ca.RecordModifier(ca.RecordModifiers.long_string, None),
)],
['x.VAL{"ts":true}',
('x.VAL', 'x', 'VAL',
ca.RecordModifier(ca.RecordModifiers.filtered, '{"ts":true}')
)],
['x.{}',
('x', 'x', None,
ca.RecordModifier(ca.RecordModifiers.filtered, '{}'),
)],
['x.VAL{}',
('x.VAL', 'x', 'VAL',
ca.RecordModifier(ca.RecordModifiers.filtered, '{}'),
)],
['x.NAME${}',
('x.NAME', 'x', 'NAME',
ca.RecordModifier(ca.RecordModifiers.filtered |
ca.RecordModifiers.long_string, '{}'),
)],
]
@pytest.mark.parametrize('pvname, expected_tuple', records_to_check)
def test_parse_record(pvname, expected_tuple):
parsed = ca.parse_record_field(pvname)
print('parsed: ', tuple(parsed))
print('expected:', expected_tuple)
assert tuple(parsed) == expected_tuple
if parsed.modifiers:
modifiers, filter_text = parsed.modifiers
if filter_text:
# smoke test these
ca.parse_channel_filter(filter_text)
bad_filters = [
["x.{not-json}",
('x', 'x', None,
ca.RecordModifier(ca.RecordModifiers.filtered, '{not-json}'),
)],
['x.{"none":null}',
('x', 'x', None,
ca.RecordModifier(ca.RecordModifiers.filtered, '{"none":null}'),
)],
]
@pytest.mark.parametrize('pvname, expected_tuple', bad_filters)
def test_parse_record_bad_filters(pvname, expected_tuple):
parsed = ca.parse_record_field(pvname)
print('parsed: ', tuple(parsed))
print('expected:', expected_tuple)
assert tuple(parsed) == expected_tuple
modifiers, filter_text = parsed.modifiers
try:
filter_ = ca.parse_channel_filter(filter_text)
except ValueError:
# expected failure
...
else:
raise ValueError(f'Expected failure, instead returned {filter_}')
@pytest.mark.parametrize('protocol', list(ca.Protocol))
def test_env_util_smoke(protocol):
ca.get_environment_variables()
try:
ca.get_netifaces_addresses()
except RuntimeError:
# Netifaces may be unavailable
...
ca.get_address_list(protocol=protocol)
ca.get_beacon_address_list(protocol=protocol)
ca._utils.get_manually_specified_beacon_addresses(protocol=protocol)
ca._utils.get_manually_specified_client_addresses(protocol=protocol)
ca.get_server_address_list(protocol=protocol)
@pytest.mark.parametrize(
'addr, default_port, expected',
[pytest.param('1.2.3.4:56', 8, ('1.2.3.4', 56)),
pytest.param('1.2.3.4', 8, ('1.2.3.4', 8)),
pytest.param('[::]:34', 8, ValueError),
]
)
def test_split_address(addr, default_port, expected):
if expected in {ValueError, }:
with pytest.raises(expected):
ca._utils.get_address_and_port_from_string(addr, default_port)
return
assert ca._utils.get_address_and_port_from_string(addr, default_port) == expected
def patch_env(monkeypatch, env_vars):
"""Patch `get_environment_variables` for testing below."""
def get_env():
return env_vars
monkeypatch.setattr(ca._utils, 'get_environment_variables', get_env)
@pytest.mark.parametrize('protocol', list(ca.Protocol))
@pytest.mark.parametrize(
'default_port, env_auto, env_addr, expected',
[
pytest.param(
8088, 'YES', '1.2.3.4 1.2.3.4:556',
[('1.2.3.4', 8088),
('1.2.3.4', 556),
('255.255.255.255', 8088),
]
),
pytest.param(
8088, 'NO', '1.2.3.4 1.2.3.4:556',
[('1.2.3.4', 8088),
('1.2.3.4', 556),
]
),
],
)
def test_beacon_addresses(monkeypatch, protocol, default_port, env_auto,
env_addr, expected):
env = ca.get_environment_variables()
key = ca.Protocol(protocol).server_env_key
env[f'EPICS_{key}_BEACON_ADDR_LIST'] = env_addr
env[f'EPICS_{key}_AUTO_BEACON_ADDR_LIST'] = env_auto
if protocol == ca.Protocol.ChannelAccess:
env['EPICS_CAS_BEACON_PORT'] = int(default_port)
else:
env['EPICS_PVAS_BROADCAST_PORT'] = int(default_port)
patch_env(monkeypatch, env)
assert set(ca.get_beacon_address_list(protocol=protocol)) == set(expected)
@pytest.mark.parametrize('protocol', list(ca.Protocol))
@pytest.mark.parametrize(
'default_port, env_auto, env_addr, expected',
[
pytest.param(
8088, 'YES', '1.2.3.4 1.2.3.4:556',
[('1.2.3.4', 8088),
('1.2.3.4', 556),
('255.255.255.255', 8088),
]
),
pytest.param(
8088, 'NO', '1.2.3.4 1.2.3.4:556',
[('1.2.3.4', 8088),
('1.2.3.4', 556),
]
),
],
)
def test_client_addresses(monkeypatch, protocol, default_port, env_auto,
env_addr, expected):
env = ca.get_environment_variables()
# Easier to test without netifaces
monkeypatch.setattr(ca._utils, 'netifaces', None)
env[f'EPICS_{protocol}_ADDR_LIST'] = env_addr
env[f'EPICS_{protocol}_AUTO_ADDR_LIST'] = env_auto
if protocol == 'CA':
env['EPICS_CA_SERVER_PORT'] = int(default_port)
elif protocol == 'PVA':
env['EPICS_PVA_BROADCAST_PORT'] = int(default_port)
patch_env(monkeypatch, env)
assert set(ca.get_client_address_list(protocol=protocol)) == set(expected)
@pytest.mark.parametrize('protocol', list(ca.Protocol))
@pytest.mark.parametrize(
'env_addr, expected',
[
pytest.param('1.2.3.4', {'1.2.3.4'}, id='normal'),
pytest.param('1.2.3.4 1.2.3.4:556', {'1.2.3.4'}, id='ignore-port',
marks=pytest.mark.filterwarnings("ignore:Port specified"),
),
pytest.param('172.16.17.32 192.168.3.11:556', {'172.16.17.32', '192.168.3.11'},
id='ignore-port-1',
marks=pytest.mark.filterwarnings("ignore:Port specified"),
),
pytest.param('', ['0.0.0.0'], id='empty-list'),
],
)
def test_server_addresses(monkeypatch, protocol, env_addr, expected):
env = ca.get_environment_variables()
key = ca.Protocol(protocol).server_env_key
env[f'EPICS_{key}_INTF_ADDR_LIST'] = env_addr
patch_env(monkeypatch, env)
assert set(ca.get_server_address_list(protocol=protocol)) == set(expected) | caproto/tests/test_utils.py | import os
import pytest
import caproto as ca
from caproto._headers import MessageHeader
def test_broadcast_auto_address_list():
pytest.importorskip('netifaces')
env = os.environ.copy()
try:
os.environ['EPICS_CA_ADDR_LIST'] = ''
os.environ['EPICS_CA_AUTO_ADDR_LIST'] = 'YES'
expected = set(bcast for addr, bcast in ca.get_netifaces_addresses())
assert set(ca.get_address_list()) == expected
finally:
os.environ.clear()
os.environ.update(env)
def test_ensure_bytes():
assert ca.ensure_bytes('abc') == b'abc\0'
assert ca.ensure_bytes(b'abc\0') == b'abc\0'
with pytest.raises(ca.CaprotoTypeError):
ca.ensure_bytes(1)
_incr_sends = [
[(b'abc', b'def', b'ghi'),
0,
(b'abc', b'def', b'ghi')
],
[(b'abc', b'def', b'ghi'),
1,
(b'bc', b'def', b'ghi')
],
[(b'abc', b'def', b'ghi'),
3,
(b'def', b'ghi')
],
[(MessageHeader(0, 1, 2, 3, 4, 5), b'def'),
0,
(bytes(MessageHeader(0, 1, 2, 3, 4, 5)), b'def'),
],
[(MessageHeader(0, 1, 2, 3, 4, 5), b'def'),
5,
(bytes(MessageHeader(0, 1, 2, 3, 4, 5))[5:], b'def'),
],
]
@pytest.mark.parametrize('buffers, offset, expected', _incr_sends)
def test_buffer_list_slice(buffers, offset, expected):
assert ca.buffer_list_slice(*buffers, offset=offset) == expected
@pytest.mark.parametrize('buffers, offset, expected', _incr_sends)
def test_incremental_send(buffers, offset, expected):
full_bytes = b''.join(bytes(b) for b in buffers)
gen = ca.incremental_buffer_list_slice(*buffers)
gen.send(None)
for i in range(len(full_bytes)):
try:
buffers = gen.send(1)
except StopIteration:
assert i == (len(full_bytes) - 1), 'StopIteration unexpected'
break
assert full_bytes[i + 1:] == b''.join(bytes(b) for b in buffers)
records_to_check = [
['x.NAME', ('x.NAME', 'x', 'NAME', None)],
['x.', ('x', 'x', None, None)],
['x', ('x', 'x', None, None)],
['x.NAME$',
('x.NAME', 'x', 'NAME',
ca.RecordModifier(ca.RecordModifiers.long_string, None),
)],
['x.VAL{"ts":true}',
('x.VAL', 'x', 'VAL',
ca.RecordModifier(ca.RecordModifiers.filtered, '{"ts":true}')
)],
['x.{}',
('x', 'x', None,
ca.RecordModifier(ca.RecordModifiers.filtered, '{}'),
)],
['x.VAL{}',
('x.VAL', 'x', 'VAL',
ca.RecordModifier(ca.RecordModifiers.filtered, '{}'),
)],
['x.NAME${}',
('x.NAME', 'x', 'NAME',
ca.RecordModifier(ca.RecordModifiers.filtered |
ca.RecordModifiers.long_string, '{}'),
)],
]
@pytest.mark.parametrize('pvname, expected_tuple', records_to_check)
def test_parse_record(pvname, expected_tuple):
parsed = ca.parse_record_field(pvname)
print('parsed: ', tuple(parsed))
print('expected:', expected_tuple)
assert tuple(parsed) == expected_tuple
if parsed.modifiers:
modifiers, filter_text = parsed.modifiers
if filter_text:
# smoke test these
ca.parse_channel_filter(filter_text)
bad_filters = [
["x.{not-json}",
('x', 'x', None,
ca.RecordModifier(ca.RecordModifiers.filtered, '{not-json}'),
)],
['x.{"none":null}',
('x', 'x', None,
ca.RecordModifier(ca.RecordModifiers.filtered, '{"none":null}'),
)],
]
@pytest.mark.parametrize('pvname, expected_tuple', bad_filters)
def test_parse_record_bad_filters(pvname, expected_tuple):
parsed = ca.parse_record_field(pvname)
print('parsed: ', tuple(parsed))
print('expected:', expected_tuple)
assert tuple(parsed) == expected_tuple
modifiers, filter_text = parsed.modifiers
try:
filter_ = ca.parse_channel_filter(filter_text)
except ValueError:
# expected failure
...
else:
raise ValueError(f'Expected failure, instead returned {filter_}')
@pytest.mark.parametrize('protocol', list(ca.Protocol))
def test_env_util_smoke(protocol):
ca.get_environment_variables()
try:
ca.get_netifaces_addresses()
except RuntimeError:
# Netifaces may be unavailable
...
ca.get_address_list(protocol=protocol)
ca.get_beacon_address_list(protocol=protocol)
ca._utils.get_manually_specified_beacon_addresses(protocol=protocol)
ca._utils.get_manually_specified_client_addresses(protocol=protocol)
ca.get_server_address_list(protocol=protocol)
@pytest.mark.parametrize(
'addr, default_port, expected',
[pytest.param('1.2.3.4:56', 8, ('1.2.3.4', 56)),
pytest.param('1.2.3.4', 8, ('1.2.3.4', 8)),
pytest.param('[::]:34', 8, ValueError),
]
)
def test_split_address(addr, default_port, expected):
if expected in {ValueError, }:
with pytest.raises(expected):
ca._utils.get_address_and_port_from_string(addr, default_port)
return
assert ca._utils.get_address_and_port_from_string(addr, default_port) == expected
def patch_env(monkeypatch, env_vars):
"""Patch `get_environment_variables` for testing below."""
def get_env():
return env_vars
monkeypatch.setattr(ca._utils, 'get_environment_variables', get_env)
@pytest.mark.parametrize('protocol', list(ca.Protocol))
@pytest.mark.parametrize(
'default_port, env_auto, env_addr, expected',
[
pytest.param(
8088, 'YES', '1.2.3.4 1.2.3.4:556',
[('1.2.3.4', 8088),
('1.2.3.4', 556),
('255.255.255.255', 8088),
]
),
pytest.param(
8088, 'NO', '1.2.3.4 1.2.3.4:556',
[('1.2.3.4', 8088),
('1.2.3.4', 556),
]
),
],
)
def test_beacon_addresses(monkeypatch, protocol, default_port, env_auto,
env_addr, expected):
env = ca.get_environment_variables()
key = ca.Protocol(protocol).server_env_key
env[f'EPICS_{key}_BEACON_ADDR_LIST'] = env_addr
env[f'EPICS_{key}_AUTO_BEACON_ADDR_LIST'] = env_auto
if protocol == ca.Protocol.ChannelAccess:
env['EPICS_CAS_BEACON_PORT'] = int(default_port)
else:
env['EPICS_PVAS_BROADCAST_PORT'] = int(default_port)
patch_env(monkeypatch, env)
assert set(ca.get_beacon_address_list(protocol=protocol)) == set(expected)
@pytest.mark.parametrize('protocol', list(ca.Protocol))
@pytest.mark.parametrize(
'default_port, env_auto, env_addr, expected',
[
pytest.param(
8088, 'YES', '1.2.3.4 1.2.3.4:556',
[('1.2.3.4', 8088),
('1.2.3.4', 556),
('255.255.255.255', 8088),
]
),
pytest.param(
8088, 'NO', '1.2.3.4 1.2.3.4:556',
[('1.2.3.4', 8088),
('1.2.3.4', 556),
]
),
],
)
def test_client_addresses(monkeypatch, protocol, default_port, env_auto,
env_addr, expected):
env = ca.get_environment_variables()
# Easier to test without netifaces
monkeypatch.setattr(ca._utils, 'netifaces', None)
env[f'EPICS_{protocol}_ADDR_LIST'] = env_addr
env[f'EPICS_{protocol}_AUTO_ADDR_LIST'] = env_auto
if protocol == 'CA':
env['EPICS_CA_SERVER_PORT'] = int(default_port)
elif protocol == 'PVA':
env['EPICS_PVA_BROADCAST_PORT'] = int(default_port)
patch_env(monkeypatch, env)
assert set(ca.get_client_address_list(protocol=protocol)) == set(expected)
@pytest.mark.parametrize('protocol', list(ca.Protocol))
@pytest.mark.parametrize(
'env_addr, expected',
[
pytest.param('1.2.3.4', {'1.2.3.4'}, id='normal'),
pytest.param('1.2.3.4 1.2.3.4:556', {'1.2.3.4'}, id='ignore-port',
marks=pytest.mark.filterwarnings("ignore:Port specified"),
),
pytest.param('172.16.17.32 192.168.3.11:556', {'172.16.17.32', '192.168.3.11'},
id='ignore-port-1',
marks=pytest.mark.filterwarnings("ignore:Port specified"),
),
pytest.param('', ['0.0.0.0'], id='empty-list'),
],
)
def test_server_addresses(monkeypatch, protocol, env_addr, expected):
env = ca.get_environment_variables()
key = ca.Protocol(protocol).server_env_key
env[f'EPICS_{key}_INTF_ADDR_LIST'] = env_addr
patch_env(monkeypatch, env)
assert set(ca.get_server_address_list(protocol=protocol)) == set(expected) | 0.484624 | 0.378344 |
from tornado.testing import AsyncHTTPTestCase
import tornado.web
import tornado.httputil
import tornado.escape
from unittest.mock import Mock
from error import DoesNotExist
from format import JsonAttributeGroup
from group import UnixGroup
from .group import (
HttpRequestGroup,
Parameter,
)
from storage import UnixGroupStorage
class Defaults:
unix_group = UnixGroup(
name="groupname",
id_=10000,
members=("first-user", "second-user")
)
class Mocks:
def __init__(self):
self.storage = Mock(spec=UnixGroupStorage)
class GroupsTest(AsyncHTTPTestCase):
API_ENDPOINT = "/api/groups"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._mocks = Mocks()
def setUp(self):
super().setUp()
self._mocks.storage.reset_mock()
def get_app(self):
return tornado.web.Application(
handlers=[
(self.API_ENDPOINT, HttpRequestGroup, dict(group_storage=self._mocks.storage)),
])
def test_get_all_groups(self):
self._mocks.storage.get_all.return_value = [
Defaults.unix_group,
Defaults.unix_group
]
response = self.fetch(self.API_ENDPOINT, method="GET")
decoded_response = tornado.escape.json_decode(response.body)
self.assertEqual(200, response.code)
self.assertIn("all", decoded_response)
self.assertEqual(len(decoded_response["all"]), 2, "Expects to return two groups")
def test_get_all_groups_when_non_existing(self):
self._mocks.storage.get_all.return_value = []
response = self.fetch(self.API_ENDPOINT, method="GET")
decoded_response = tornado.escape.json_decode(response.body)
self.assertEqual(200, response.code)
self.assertIn("all", decoded_response)
self.assertEqual(len(decoded_response["all"]), 0, "Expects to return empty list")
def test_get_group_by_invalid_id(self):
url = tornado.httputil.url_concat(self.API_ENDPOINT, {
Parameter.USER_ID: "nan"
})
response = self.fetch(url, method="GET")
self.assertEqual(400, response.code)
def test_invalid_argument(self):
url = tornado.httputil.url_concat(self.API_ENDPOINT, {
"invalid-attribute": "nan"
})
response = self.fetch(url, method="GET")
self.assertEqual(400, response.code)
def test_get_group_by_id(self):
self._mocks.storage.get_by_id.return_value = Defaults.unix_group
url = tornado.httputil.url_concat(self.API_ENDPOINT, {
Parameter.USER_ID: "10000"
})
response = self.fetch(url, method="GET")
decoded_response = tornado.escape.json_decode(response.body)
self.assertEqual(200, response.code)
self._assert_attributes(decoded_response)
def test_get_group_by_name(self):
self._mocks.storage.get_by_name.return_value = Defaults.unix_group
url = tornado.httputil.url_concat(self.API_ENDPOINT, {
Parameter.USER_NAME: "user"
})
response = self.fetch(url, method="GET")
decoded_response = tornado.escape.json_decode(response.body)
self.assertEqual(200, response.code)
self._assert_attributes(decoded_response)
def _assert_attributes(self, decoded_response):
self.assertEqual(Defaults.unix_group.id, decoded_response[JsonAttributeGroup.gid])
self.assertEqual(Defaults.unix_group.name, decoded_response[JsonAttributeGroup.name])
self.assertEqual(Defaults.unix_group.id, decoded_response[JsonAttributeGroup.gid])
self.assertSetEqual(set(Defaults.unix_group.members), set(decoded_response[JsonAttributeGroup.members]))
def test_get_non_existing_group_by_name(self):
self._mocks.storage.get_by_name = Mock(side_effect=DoesNotExist())
url = tornado.httputil.url_concat(self.API_ENDPOINT, {
Parameter.USER_NAME: "user"
})
response = self.fetch(url, method="GET")
self.assertEqual(404, response.code)
def test_get_non_existing_group_by_gid(self):
self._mocks.storage.get_by_id = Mock(side_effect=DoesNotExist())
url = tornado.httputil.url_concat(self.API_ENDPOINT, {
Parameter.USER_ID: "10000"
})
response = self.fetch(url, method="GET")
self.assertEqual(404, response.code) | src/unix_accounts/http_request/group_test.py | from tornado.testing import AsyncHTTPTestCase
import tornado.web
import tornado.httputil
import tornado.escape
from unittest.mock import Mock
from error import DoesNotExist
from format import JsonAttributeGroup
from group import UnixGroup
from .group import (
HttpRequestGroup,
Parameter,
)
from storage import UnixGroupStorage
class Defaults:
unix_group = UnixGroup(
name="groupname",
id_=10000,
members=("first-user", "second-user")
)
class Mocks:
def __init__(self):
self.storage = Mock(spec=UnixGroupStorage)
class GroupsTest(AsyncHTTPTestCase):
API_ENDPOINT = "/api/groups"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._mocks = Mocks()
def setUp(self):
super().setUp()
self._mocks.storage.reset_mock()
def get_app(self):
return tornado.web.Application(
handlers=[
(self.API_ENDPOINT, HttpRequestGroup, dict(group_storage=self._mocks.storage)),
])
def test_get_all_groups(self):
self._mocks.storage.get_all.return_value = [
Defaults.unix_group,
Defaults.unix_group
]
response = self.fetch(self.API_ENDPOINT, method="GET")
decoded_response = tornado.escape.json_decode(response.body)
self.assertEqual(200, response.code)
self.assertIn("all", decoded_response)
self.assertEqual(len(decoded_response["all"]), 2, "Expects to return two groups")
def test_get_all_groups_when_non_existing(self):
self._mocks.storage.get_all.return_value = []
response = self.fetch(self.API_ENDPOINT, method="GET")
decoded_response = tornado.escape.json_decode(response.body)
self.assertEqual(200, response.code)
self.assertIn("all", decoded_response)
self.assertEqual(len(decoded_response["all"]), 0, "Expects to return empty list")
def test_get_group_by_invalid_id(self):
url = tornado.httputil.url_concat(self.API_ENDPOINT, {
Parameter.USER_ID: "nan"
})
response = self.fetch(url, method="GET")
self.assertEqual(400, response.code)
def test_invalid_argument(self):
url = tornado.httputil.url_concat(self.API_ENDPOINT, {
"invalid-attribute": "nan"
})
response = self.fetch(url, method="GET")
self.assertEqual(400, response.code)
def test_get_group_by_id(self):
self._mocks.storage.get_by_id.return_value = Defaults.unix_group
url = tornado.httputil.url_concat(self.API_ENDPOINT, {
Parameter.USER_ID: "10000"
})
response = self.fetch(url, method="GET")
decoded_response = tornado.escape.json_decode(response.body)
self.assertEqual(200, response.code)
self._assert_attributes(decoded_response)
def test_get_group_by_name(self):
self._mocks.storage.get_by_name.return_value = Defaults.unix_group
url = tornado.httputil.url_concat(self.API_ENDPOINT, {
Parameter.USER_NAME: "user"
})
response = self.fetch(url, method="GET")
decoded_response = tornado.escape.json_decode(response.body)
self.assertEqual(200, response.code)
self._assert_attributes(decoded_response)
def _assert_attributes(self, decoded_response):
self.assertEqual(Defaults.unix_group.id, decoded_response[JsonAttributeGroup.gid])
self.assertEqual(Defaults.unix_group.name, decoded_response[JsonAttributeGroup.name])
self.assertEqual(Defaults.unix_group.id, decoded_response[JsonAttributeGroup.gid])
self.assertSetEqual(set(Defaults.unix_group.members), set(decoded_response[JsonAttributeGroup.members]))
def test_get_non_existing_group_by_name(self):
self._mocks.storage.get_by_name = Mock(side_effect=DoesNotExist())
url = tornado.httputil.url_concat(self.API_ENDPOINT, {
Parameter.USER_NAME: "user"
})
response = self.fetch(url, method="GET")
self.assertEqual(404, response.code)
def test_get_non_existing_group_by_gid(self):
self._mocks.storage.get_by_id = Mock(side_effect=DoesNotExist())
url = tornado.httputil.url_concat(self.API_ENDPOINT, {
Parameter.USER_ID: "10000"
})
response = self.fetch(url, method="GET")
self.assertEqual(404, response.code) | 0.753829 | 0.174903 |
# # S_FacRepNormTest [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=S_FacRepNormTest&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=eb-fac-rep-port-norm).
# ## Prepare the environment
# +
import os
import os.path as path
import sys
sys.path.append(path.abspath('../../functions-legacy'))
import numpy as np
from numpy import arange, array, ones, zeros, diag, eye, tile, r_
from numpy.linalg import solve
from numpy.random import rand
from numpy.random import multivariate_normal as mvnrnd
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure, plot, legend, scatter, ylabel, \
xlabel, title
plt.style.use('seaborn')
from ARPM_utils import save_plot
from MultivRsquare import MultivRsquare
# input parameters
n_ = 500 # max market dimension
nstep = arange(10, n_+25,25) # market dimension steps
j_ = 1000 # number of simulations
k_ = 1 # number of factors
sig2_Z_ = 1 # factor variance
r = 0.02 # risk-free rate
stepsize = len(nstep)
R2 = zeros((stepsize, 1))
for n in range(stepsize):
# ## Generate a sample from the joint distribution of the factor and the residuals
mu_Z_U = zeros((k_ + nstep[n], 1)) # expectation
sig_Z_U = zeros((k_, nstep[n])) # systematic condition
d = rand(nstep[n], 1) # residuals standard deviations
sig2_U = np.diagflat(d * d) # idiosyncratic condition
sig2_Z_U = r_[r_['-1',array([[sig2_Z_]]), sig_Z_U], r_['-1',sig_Z_U.T, sig2_U]] # covariance
Z_U = mvnrnd(mu_Z_U.flatten(), sig2_Z_U, j_)
Z_U = Z_U.T # ensure Z_U is n_ x nsim
Z_ = Z_U[0] # factor sample
# ## Compute the P&L's: P = alpha + beta@Z_ + U
alpha = rand(nstep[n], 1) # shift parameter (P&L's expectation)
beta = rand(nstep[n], k_) # loadings
i_n = eye(nstep[n])
P = tile(alpha, (1, j_)) + r_['-1',beta, i_n]@Z_U # sample
sig2_P = beta@array([[sig2_Z_]])@beta.T + sig2_U # (low-rank diagonal) covariance
# ## Compute the sample of the factor-replicating portfolio
s2 = i_n
betap = solve(beta.T@s2@beta,beta.T@s2) # pseudo inverse of beta
P_Z = betap@P # sample
mu_P_Z = betap@alpha # expectation
sig2_P_Z = betap@sig2_P@betap.T # covariance
# ## Compute premium via APT
v = ones((nstep[n], 1)) # current values of P&L's
lam = betap@(alpha - r*v)
Z = Z_ + lam # shifted factors
# ## Compute the r-square at dimension nstep[n]
sig2_U_Z_ = <EMAIL> # covariance of P_Z - r@ betap@v - lam - Z_
sigvec_Z_ = diag(array([sig2_Z_]))
R2[n] = MultivRsquare(sig2_U_Z_, array([[sig2_Z_]]), np.diagflat(1 / sigvec_Z_))
# -
# ## Scatter plot of factor plus premium vs factor replicating portfolios P&L's in excess of the risk-free investement
figure()
scatter(Z, P_Z - r*betap@v, marker='.',s=0.5)
scatter(lam, mu_P_Z - r*betap@v, marker='.', color='r', s=50)
xlabel('Z')
ylabel('Excess PL factor replicating portfolio')
title('Scatter plot for n = %d' % n_)
legend(['sample', 'expectation']);
# save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1])
# ## Plot the r-squares for each market dimension
# +
figure()
plot(nstep, R2, 'r', lw=1.2)
plot(nstep, ones(stepsize), 'b', lw=2)
xlabel('n')
ylabel('r-square')
title('Factor-replicating portfolio convergence');
# save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1]) | scripts/sources/S_FacRepNormTest.py |
# # S_FacRepNormTest [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=S_FacRepNormTest&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=eb-fac-rep-port-norm).
# ## Prepare the environment
# +
import os
import os.path as path
import sys
sys.path.append(path.abspath('../../functions-legacy'))
import numpy as np
from numpy import arange, array, ones, zeros, diag, eye, tile, r_
from numpy.linalg import solve
from numpy.random import rand
from numpy.random import multivariate_normal as mvnrnd
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure, plot, legend, scatter, ylabel, \
xlabel, title
plt.style.use('seaborn')
from ARPM_utils import save_plot
from MultivRsquare import MultivRsquare
# input parameters
n_ = 500 # max market dimension
nstep = arange(10, n_+25,25) # market dimension steps
j_ = 1000 # number of simulations
k_ = 1 # number of factors
sig2_Z_ = 1 # factor variance
r = 0.02 # risk-free rate
stepsize = len(nstep)
R2 = zeros((stepsize, 1))
for n in range(stepsize):
# ## Generate a sample from the joint distribution of the factor and the residuals
mu_Z_U = zeros((k_ + nstep[n], 1)) # expectation
sig_Z_U = zeros((k_, nstep[n])) # systematic condition
d = rand(nstep[n], 1) # residuals standard deviations
sig2_U = np.diagflat(d * d) # idiosyncratic condition
sig2_Z_U = r_[r_['-1',array([[sig2_Z_]]), sig_Z_U], r_['-1',sig_Z_U.T, sig2_U]] # covariance
Z_U = mvnrnd(mu_Z_U.flatten(), sig2_Z_U, j_)
Z_U = Z_U.T # ensure Z_U is n_ x nsim
Z_ = Z_U[0] # factor sample
# ## Compute the P&L's: P = alpha + beta@Z_ + U
alpha = rand(nstep[n], 1) # shift parameter (P&L's expectation)
beta = rand(nstep[n], k_) # loadings
i_n = eye(nstep[n])
P = tile(alpha, (1, j_)) + r_['-1',beta, i_n]@Z_U # sample
sig2_P = beta@array([[sig2_Z_]])@beta.T + sig2_U # (low-rank diagonal) covariance
# ## Compute the sample of the factor-replicating portfolio
s2 = i_n
betap = solve(beta.T@s2@beta,beta.T@s2) # pseudo inverse of beta
P_Z = betap@P # sample
mu_P_Z = betap@alpha # expectation
sig2_P_Z = betap@sig2_P@betap.T # covariance
# ## Compute premium via APT
v = ones((nstep[n], 1)) # current values of P&L's
lam = betap@(alpha - r*v)
Z = Z_ + lam # shifted factors
# ## Compute the r-square at dimension nstep[n]
sig2_U_Z_ = <EMAIL> # covariance of P_Z - r@ betap@v - lam - Z_
sigvec_Z_ = diag(array([sig2_Z_]))
R2[n] = MultivRsquare(sig2_U_Z_, array([[sig2_Z_]]), np.diagflat(1 / sigvec_Z_))
# -
# ## Scatter plot of factor plus premium vs factor replicating portfolios P&L's in excess of the risk-free investement
figure()
scatter(Z, P_Z - r*betap@v, marker='.',s=0.5)
scatter(lam, mu_P_Z - r*betap@v, marker='.', color='r', s=50)
xlabel('Z')
ylabel('Excess PL factor replicating portfolio')
title('Scatter plot for n = %d' % n_)
legend(['sample', 'expectation']);
# save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1])
# ## Plot the r-squares for each market dimension
# +
figure()
plot(nstep, R2, 'r', lw=1.2)
plot(nstep, ones(stepsize), 'b', lw=2)
xlabel('n')
ylabel('r-square')
title('Factor-replicating portfolio convergence');
# save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1]) | 0.686475 | 0.675186 |
import click
from flask import Flask,request
import os
import random
from DBInsertion import DBInsertion
from datetime import datetime,timedelta
current_directory= os.path.dirname(__file__)
database_file_path = os.path.join(current_directory, "../DBScript/CMD2WEB.sqlite")
database_object = DBInsertion(database_file_path)
@click.group()
# @click.option('-i', '--input', type=click.File('r'))
def cli():
"""Command line interface for database interaction."""
pass
@cli.command()
@click.option('--gname', help='The group name.')
@click.option('--gtype', help='Type of the group.')
@click.option('--restricted', help='If the group is restricted group. 1 if group is restricted ,0 if not restricted.')
def createGroup(gname, gtype,restricted):
"""Description:Create a new group. \n
Input parameters required: \n
- gname - Group Name \n
- gytpe - Group Type \n
- restricted - Boolean indicating whether group is restricted \n
Example Usage: python DBCommandLineTool.py creategroup --gname=DummyGroup --gtype=Test --restricted=1 """
group_name = gname
group_type = gtype
if (group_name != None and restricted != None):
# insert
print(group_name, group_type, restricted)
database_object.insert_group(group_name, group_type, restricted)
print("Group {0} created".format(group_name))
else:
print(group_name, group_type, restricted)
print("Parameter missing")
# click.echo('Hello %s! - %s! - %d' % gname, gtype,restricted)
@cli.command()
@click.option('--gid', help='The group id.')
@click.option('--token', help='Token for the user associated to a group.Format (mm-dd-yyyy).')
@click.option('--expiry', help='Expiry date for the token.')
@click.option('--email', help='Email id of the user.')
def createKeyByGroupID(gid, token, expiry, email):
"""Description:Create new token by group id.\n
Input parameters required: \n
- gid - Group ID \n
- token - Token for the user \n
- expiry - Token expiry date \n
- email - Email id of the user\n
Example Usage: python DBCommandLineTool.py createkeybygroupid --gid=9 --token=<PASSWORD> --expiry=04-27-2019 --email=<EMAIL>"""
group_id = gid
token = token
expiry = expiry
user_email = email
if(expiry==None):
expiry = getNewDate()
if(token==None):
token = generateNewToken()
if (group_id != None and token != None and expiry != None and user_email != None):
database_object.insert_key(group_id, token, expiry, user_email)
print("Token:{0} inserted for the user:{1} with expiry:{2}".format(token, user_email, expiry))
else:
print("Parameter missing")
# click.echo('Hello %s! - %s! - %s! - %s!' % gid, token, expiry, email)
# Generate new date
def getNewDate():
newdate = datetime.now() + timedelta(days=365)
expiry = newdate.strftime('%m-%d-%Y')
return expiry
# Generate new random token
def generateNewToken():
new_token= random.randint(10000000,99999999)
if(database_object.check_token_exists(new_token)):
return generateNewToken()
else:
return new_token
@cli.command()
@click.option('--gname', help='The group name.')
@click.option('--token', help='Token for the user associated to a group.')
@click.option('--expiry', help='Expiry date for the token. Format (mm-dd-yyyy).')
@click.option('--email', help='Email id of the user.')
def createKeyByGroupName(gname, token, expiry, email):
"""Description:Create new token by group name. \n
Input parameters required: \n
- gname - Group Name \n
- token - Token for the user \n
- expiry - Token expiry date \n
- email - Email id of the user\n
Example Usage: python DBCommandLineTool.py createkeybygroupname --gname=DummyGroup --token=<PASSWORD> --expiry=05-27-2019 --email=<EMAIL> """
group_name = gname
token = token
expiry = expiry
user_email = email
if(expiry==None):
expiry = getNewDate()
if(token==None):
token = generateNewToken()
group_id = None
if (group_name != None):
# get group id
group_id = database_object.get_group_name_from_id(group_name)
else:
return "No group name"
if (group_id != None and token != None and expiry != None and user_email != None):
database_object.insert_key(group_id, token, expiry, user_email)
print("Token:{0} inserted for the user:{1} with expiry:{2}".format(token, user_email, expiry))
else:
print("Parameter missing")
@cli.command()
@click.option('--gname', help='The group name.')
def deleteGroup(gname):
"""Description:Delete group by name.\n
Input parameters required: \n
- gname - Group Name \n
Example Usage: python DBCommandLineTool.py deletegroup --gname=DummyGroup"""
group_name = gname
if (group_name != None):
database_object.delete_group(group_name)
print("Deleted group {0}".format(group_name))
else:
print("Check group info")
# click.echo('Hello %s! - %s! - %s! - %s!' % gname, token, expiry, email)
@cli.command()
@click.option('--gname', help='The group name.')
def deleteKeyByGroup(gname):
"""Description:Delete key by group name.\n
Input parameters required: \n
- gname - Group Name \n
Example Usage: python DBCommandLineTool.py deletekeybygroup --gname=DummyGroup"""
group_name = gname
if (group_name != None):
database_object.delete_group_keys(group_name)
print("Deleted keys for group {0}".format(group_name))
else:
print("Check group info")
# click.echo('Hello %s! - %s! - %s! - %s!' % gname, token, expiry, email)
@cli.command()
@click.option('--email', help='The user email.')
def deleteKeyByUser(email):
"""Description:Delete key by group user.\n
Input parameters required: \n
- email - email id of the user \n
Example Usage: python DBCommandLineTool.py deletekeybyuser --email=<EMAIL>"""
user_email = email
if (user_email != None):
database_object.delete_user_keys(user_email)
print("Deleted keys for user {0}".format(user_email))
else:
print("Check group info")
# click.echo('Hello %s! - %s! - %s! - %s!' % gname, token, expiry, email)
@cli.command()
@click.option('--email', help='The user email.')
def getKeyByUser(email):
"""Description:Get Keys by User.\n
Input parameters required: \n
- email - email id of the user \n
Example Usage: python DBCommandLineTool.py getkeybyuser --email=<EMAIL>"""
user_email = email
if (user_email != None):
result = database_object.get_user_keys(user_email)
print(result)
else:
print("Check group info")
@cli.command()
@click.option('--gname', help='The Group name.')
def getKeyByGroupName(gname):
"""Description:Get keys by Group.\n
Input parameters required: \n
- gname - Group name \n
Example Usage: python DBCommandLineTool.py getkeybygroupname --gname=DummyGroup"""
group_name = gname
if (group_name != None):
result = database_object.get_user_keys_by_group_name(group_name)
print(result)
else:
print("Check group info")
@cli.command()
def getGroupList():
"""Description:Get all the groups.\n
Input parameters required: \n
None \n
Example Usage: python DBCommandLineTool.py getgrouplist """
result = database_object.get_group_list()
print(result)
@cli.command()
def getKeyList():
"""Description:Get all the keys.\n
Input parameters required: \n
None \n
Example Usage: python DBCommandLineTool.py getkeylist """
result = database_object.get_key_list()
print(result)
if __name__ == '__main__':
cli() | DBOperations/DBCommandLineTool.py | import click
from flask import Flask,request
import os
import random
from DBInsertion import DBInsertion
from datetime import datetime,timedelta
current_directory= os.path.dirname(__file__)
database_file_path = os.path.join(current_directory, "../DBScript/CMD2WEB.sqlite")
database_object = DBInsertion(database_file_path)
@click.group()
# @click.option('-i', '--input', type=click.File('r'))
def cli():
"""Command line interface for database interaction."""
pass
@cli.command()
@click.option('--gname', help='The group name.')
@click.option('--gtype', help='Type of the group.')
@click.option('--restricted', help='If the group is restricted group. 1 if group is restricted ,0 if not restricted.')
def createGroup(gname, gtype,restricted):
"""Description:Create a new group. \n
Input parameters required: \n
- gname - Group Name \n
- gytpe - Group Type \n
- restricted - Boolean indicating whether group is restricted \n
Example Usage: python DBCommandLineTool.py creategroup --gname=DummyGroup --gtype=Test --restricted=1 """
group_name = gname
group_type = gtype
if (group_name != None and restricted != None):
# insert
print(group_name, group_type, restricted)
database_object.insert_group(group_name, group_type, restricted)
print("Group {0} created".format(group_name))
else:
print(group_name, group_type, restricted)
print("Parameter missing")
# click.echo('Hello %s! - %s! - %d' % gname, gtype,restricted)
@cli.command()
@click.option('--gid', help='The group id.')
@click.option('--token', help='Token for the user associated to a group.Format (mm-dd-yyyy).')
@click.option('--expiry', help='Expiry date for the token.')
@click.option('--email', help='Email id of the user.')
def createKeyByGroupID(gid, token, expiry, email):
"""Description:Create new token by group id.\n
Input parameters required: \n
- gid - Group ID \n
- token - Token for the user \n
- expiry - Token expiry date \n
- email - Email id of the user\n
Example Usage: python DBCommandLineTool.py createkeybygroupid --gid=9 --token=<PASSWORD> --expiry=04-27-2019 --email=<EMAIL>"""
group_id = gid
token = token
expiry = expiry
user_email = email
if(expiry==None):
expiry = getNewDate()
if(token==None):
token = generateNewToken()
if (group_id != None and token != None and expiry != None and user_email != None):
database_object.insert_key(group_id, token, expiry, user_email)
print("Token:{0} inserted for the user:{1} with expiry:{2}".format(token, user_email, expiry))
else:
print("Parameter missing")
# click.echo('Hello %s! - %s! - %s! - %s!' % gid, token, expiry, email)
# Generate new date
def getNewDate():
newdate = datetime.now() + timedelta(days=365)
expiry = newdate.strftime('%m-%d-%Y')
return expiry
# Generate new random token
def generateNewToken():
new_token= random.randint(10000000,99999999)
if(database_object.check_token_exists(new_token)):
return generateNewToken()
else:
return new_token
@cli.command()
@click.option('--gname', help='The group name.')
@click.option('--token', help='Token for the user associated to a group.')
@click.option('--expiry', help='Expiry date for the token. Format (mm-dd-yyyy).')
@click.option('--email', help='Email id of the user.')
def createKeyByGroupName(gname, token, expiry, email):
"""Description:Create new token by group name. \n
Input parameters required: \n
- gname - Group Name \n
- token - Token for the user \n
- expiry - Token expiry date \n
- email - Email id of the user\n
Example Usage: python DBCommandLineTool.py createkeybygroupname --gname=DummyGroup --token=<PASSWORD> --expiry=05-27-2019 --email=<EMAIL> """
group_name = gname
token = token
expiry = expiry
user_email = email
if(expiry==None):
expiry = getNewDate()
if(token==None):
token = generateNewToken()
group_id = None
if (group_name != None):
# get group id
group_id = database_object.get_group_name_from_id(group_name)
else:
return "No group name"
if (group_id != None and token != None and expiry != None and user_email != None):
database_object.insert_key(group_id, token, expiry, user_email)
print("Token:{0} inserted for the user:{1} with expiry:{2}".format(token, user_email, expiry))
else:
print("Parameter missing")
@cli.command()
@click.option('--gname', help='The group name.')
def deleteGroup(gname):
"""Description:Delete group by name.\n
Input parameters required: \n
- gname - Group Name \n
Example Usage: python DBCommandLineTool.py deletegroup --gname=DummyGroup"""
group_name = gname
if (group_name != None):
database_object.delete_group(group_name)
print("Deleted group {0}".format(group_name))
else:
print("Check group info")
# click.echo('Hello %s! - %s! - %s! - %s!' % gname, token, expiry, email)
@cli.command()
@click.option('--gname', help='The group name.')
def deleteKeyByGroup(gname):
"""Description:Delete key by group name.\n
Input parameters required: \n
- gname - Group Name \n
Example Usage: python DBCommandLineTool.py deletekeybygroup --gname=DummyGroup"""
group_name = gname
if (group_name != None):
database_object.delete_group_keys(group_name)
print("Deleted keys for group {0}".format(group_name))
else:
print("Check group info")
# click.echo('Hello %s! - %s! - %s! - %s!' % gname, token, expiry, email)
@cli.command()
@click.option('--email', help='The user email.')
def deleteKeyByUser(email):
"""Description:Delete key by group user.\n
Input parameters required: \n
- email - email id of the user \n
Example Usage: python DBCommandLineTool.py deletekeybyuser --email=<EMAIL>"""
user_email = email
if (user_email != None):
database_object.delete_user_keys(user_email)
print("Deleted keys for user {0}".format(user_email))
else:
print("Check group info")
# click.echo('Hello %s! - %s! - %s! - %s!' % gname, token, expiry, email)
@cli.command()
@click.option('--email', help='The user email.')
def getKeyByUser(email):
"""Description:Get Keys by User.\n
Input parameters required: \n
- email - email id of the user \n
Example Usage: python DBCommandLineTool.py getkeybyuser --email=<EMAIL>"""
user_email = email
if (user_email != None):
result = database_object.get_user_keys(user_email)
print(result)
else:
print("Check group info")
@cli.command()
@click.option('--gname', help='The Group name.')
def getKeyByGroupName(gname):
"""Description:Get keys by Group.\n
Input parameters required: \n
- gname - Group name \n
Example Usage: python DBCommandLineTool.py getkeybygroupname --gname=DummyGroup"""
group_name = gname
if (group_name != None):
result = database_object.get_user_keys_by_group_name(group_name)
print(result)
else:
print("Check group info")
@cli.command()
def getGroupList():
"""Description:Get all the groups.\n
Input parameters required: \n
None \n
Example Usage: python DBCommandLineTool.py getgrouplist """
result = database_object.get_group_list()
print(result)
@cli.command()
def getKeyList():
"""Description:Get all the keys.\n
Input parameters required: \n
None \n
Example Usage: python DBCommandLineTool.py getkeylist """
result = database_object.get_key_list()
print(result)
if __name__ == '__main__':
cli() | 0.419648 | 0.055669 |
from datetime import datetime
from app.models.model import *
from flask_login import UserMixin,AnonymousUserMixin
from flask import current_app
from werkzeug.security import generate_password_hash, check_password_hash
from app.includes import file
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
PREFIX = ""
class User(UserMixin, db.Model):
""" user table """
__tablename__ = db.PREFIX + PREFIX + "user"
__table_args__ = {
"mysql_engine": "InnoDB",
"mysql_charset": "utf8"
}
id = db.Column(db.Integer, primary_key = True, nullable=False)
username = db.Column(db.String(255), unique=True, nullable=False, index=True, default="")
nickname = db.Column(db.String(255), nullable = False, default="")
password = db.Column(db.String(255), default="")
avatar = db.Column(db.String(255), default="")
confirmed = db.Column(db.Boolean, default=False)
email = db.Column(db.String(64), unique=True, index=True)
updatetime = db.Column(db.DateTime, default = datetime.now, nullable=False)
timestamp = db.Column(db.DateTime, default = datetime.now, nullable=False)
books = db.relationship("Book", backref="user", lazy="dynamic")
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
if self.role is None:
if self.email == current_app.config['FLASKY_ADMIN']:
self.role = Role.query.filter_by(name='Administrator').first()
if self.role is None:
self.role = Role.query.filter_by(default=True).first()
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
@staticmethod
def add(username, password):
user = User.query.filter_by(username=username).first()
if user is not None:
return
user = User()
user.username = username
user.nickname = username
user.password = generate_password_hash(password)
user.avatar = file.new_avatar()
db.session.add(user)
db.session.commit()
return user
@staticmethod
def get(id):
return User.query.filter_by(id=id).first()
@staticmethod
def getbyname(username):
return User.query.filter_by(username=username).first()
@staticmethod
def page(page, per_page):
return User.query.paginate(page,
per_page=per_page, error_out = False)
def setting(self, nickname):
self.nickname = nickname
def change_password(self, password):
self.password = <PASSWORD>_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password, password)
def page_book(self, page, per_page):
from .book import Book
books = Book.query.filter_by(user_id=self.id)\
.options(db.Load(Book).undefer("brief"))\
.order_by(Book.publish_timestamp.desc())\
.paginate(page, per_page=per_page, error_out=False)
return books
def page_draft(self, page, per_page):
from .book import Book
books = Book.query.filter_by(user_id=self.id)\
.filter(Book.updatetime>Book.publish_timestamp)\
.options(db.Load(Book).undefer("brief"))\
.order_by(Book.publish_timestamp.desc())\
.paginate(page, per_page=per_page, error_out=False)
return books
def count_book(self):
return self.books.count()
def count_draft(self):
from .book import Book
num = Book.query.filter_by(user_id=self.id)\
.filter(Book.updatetime>Book.publish_timestamp)\
.count()
return num
def _20px_avatar(self):
image_path = current_app.config["AVATAR_PATH"]
return "/".join([image_path, "20_20_{}".format(self.avatar)])
def _50px_avatar(self):
image_path = current_app.config["AVATAR_PATH"]
return "/".join([image_path, "50_50_{}".format(self.avatar)])
def origin_avatar(self):
image_path = current_app.config["AVATAR_PATH"]
return "/".join([image_path, self.avatar])
def can(self, perm):
return self.role is not None and self.role.has_permission(perm)
def is_administrator(self):
return self.can(Permission.ADMIN)
def generate_confirmation_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id}).decode('utf-8')
def confirm(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token.encode('utf-8'))
except:
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
return True
def generate_reset_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'reset': self.id}).decode('utf-8')
@staticmethod
def reset_password(token, new_password):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token.encode('utf-8'))
except:
return False
user = User.query.get(data.get('reset'))
if user is None:
return False
user.password = <PASSWORD>
db.session.add(user)
return True
def generate_email_change_token(self, new_email, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps(
{'change_email': self.id, 'new_email': new_email}).decode('utf-8')
def change_email(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token.encode('utf-8'))
except:
return False
if data.get('change_email') != self.id:
return False
new_email = data.get('new_email')
if new_email is None:
return False
if self.query.filter_by(email=new_email).first() is not None:
return False
self.email = new_email
self.avatar_hash = self.gravatar_hash()
db.session.add(self)
return True
class AnonymousUser(AnonymousUserMixin):
def can(self, permissions):
return False
def is_administrator(self):
return False
login_manager.anonymous_user = AnonymousUser
@login_manager.user_loader
def load_user(id):
if current_app.start:
return User.query.get(int(id))
return | app/models/user.py | from datetime import datetime
from app.models.model import *
from flask_login import UserMixin,AnonymousUserMixin
from flask import current_app
from werkzeug.security import generate_password_hash, check_password_hash
from app.includes import file
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
PREFIX = ""
class User(UserMixin, db.Model):
""" user table """
__tablename__ = db.PREFIX + PREFIX + "user"
__table_args__ = {
"mysql_engine": "InnoDB",
"mysql_charset": "utf8"
}
id = db.Column(db.Integer, primary_key = True, nullable=False)
username = db.Column(db.String(255), unique=True, nullable=False, index=True, default="")
nickname = db.Column(db.String(255), nullable = False, default="")
password = db.Column(db.String(255), default="")
avatar = db.Column(db.String(255), default="")
confirmed = db.Column(db.Boolean, default=False)
email = db.Column(db.String(64), unique=True, index=True)
updatetime = db.Column(db.DateTime, default = datetime.now, nullable=False)
timestamp = db.Column(db.DateTime, default = datetime.now, nullable=False)
books = db.relationship("Book", backref="user", lazy="dynamic")
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
if self.role is None:
if self.email == current_app.config['FLASKY_ADMIN']:
self.role = Role.query.filter_by(name='Administrator').first()
if self.role is None:
self.role = Role.query.filter_by(default=True).first()
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
@staticmethod
def add(username, password):
user = User.query.filter_by(username=username).first()
if user is not None:
return
user = User()
user.username = username
user.nickname = username
user.password = generate_password_hash(password)
user.avatar = file.new_avatar()
db.session.add(user)
db.session.commit()
return user
@staticmethod
def get(id):
return User.query.filter_by(id=id).first()
@staticmethod
def getbyname(username):
return User.query.filter_by(username=username).first()
@staticmethod
def page(page, per_page):
return User.query.paginate(page,
per_page=per_page, error_out = False)
def setting(self, nickname):
self.nickname = nickname
def change_password(self, password):
self.password = <PASSWORD>_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password, password)
def page_book(self, page, per_page):
from .book import Book
books = Book.query.filter_by(user_id=self.id)\
.options(db.Load(Book).undefer("brief"))\
.order_by(Book.publish_timestamp.desc())\
.paginate(page, per_page=per_page, error_out=False)
return books
def page_draft(self, page, per_page):
from .book import Book
books = Book.query.filter_by(user_id=self.id)\
.filter(Book.updatetime>Book.publish_timestamp)\
.options(db.Load(Book).undefer("brief"))\
.order_by(Book.publish_timestamp.desc())\
.paginate(page, per_page=per_page, error_out=False)
return books
def count_book(self):
return self.books.count()
def count_draft(self):
from .book import Book
num = Book.query.filter_by(user_id=self.id)\
.filter(Book.updatetime>Book.publish_timestamp)\
.count()
return num
def _20px_avatar(self):
image_path = current_app.config["AVATAR_PATH"]
return "/".join([image_path, "20_20_{}".format(self.avatar)])
def _50px_avatar(self):
image_path = current_app.config["AVATAR_PATH"]
return "/".join([image_path, "50_50_{}".format(self.avatar)])
def origin_avatar(self):
image_path = current_app.config["AVATAR_PATH"]
return "/".join([image_path, self.avatar])
def can(self, perm):
return self.role is not None and self.role.has_permission(perm)
def is_administrator(self):
return self.can(Permission.ADMIN)
def generate_confirmation_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id}).decode('utf-8')
def confirm(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token.encode('utf-8'))
except:
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
return True
def generate_reset_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'reset': self.id}).decode('utf-8')
@staticmethod
def reset_password(token, new_password):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token.encode('utf-8'))
except:
return False
user = User.query.get(data.get('reset'))
if user is None:
return False
user.password = <PASSWORD>
db.session.add(user)
return True
def generate_email_change_token(self, new_email, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps(
{'change_email': self.id, 'new_email': new_email}).decode('utf-8')
def change_email(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token.encode('utf-8'))
except:
return False
if data.get('change_email') != self.id:
return False
new_email = data.get('new_email')
if new_email is None:
return False
if self.query.filter_by(email=new_email).first() is not None:
return False
self.email = new_email
self.avatar_hash = self.gravatar_hash()
db.session.add(self)
return True
class AnonymousUser(AnonymousUserMixin):
def can(self, permissions):
return False
def is_administrator(self):
return False
login_manager.anonymous_user = AnonymousUser
@login_manager.user_loader
def load_user(id):
if current_app.start:
return User.query.get(int(id))
return | 0.453746 | 0.06256 |
import re
DEPS = [
'depot_tools/bot_update',
'depot_tools/gclient',
'recipe_engine/json',
'recipe_engine/path',
'recipe_engine/properties',
'recipe_engine/python',
'recipe_engine/raw_io',
'recipe_engine/step',
]
TESTS = [
{
'name': 'Test auto-bisect on tester',
'properties': {
'workdir': '/b/build/slave/linux',
'repository': 'https://chromium.googlesource.com/v8/v8',
'buildername': 'V8 Linux - nosnap',
'parent_buildnumber': 9423,
'recipe': 'v8',
'mastername':
'client.v8',
'buildbotURL': 'http://build.chromium.org/p/client.v8/',
'project': 'v8',
'parent_buildername': 'V8 Linux - nosnap builder',
'git_revision': 'c08e952566c3923f8fcbd693dae05f8eae73938b',
'parent_got_revision': 'c08e952566c3923f8fcbd693dae05f8eae73938b',
'parent_got_swarming_client_revision':
'df99a00d96fae932bae824dccba13156bf7eddd0',
'buildnumber': 5472,
'bot_id': 'slave4-c3',
'swarm_hashes': {
'bot_default': '3726ca899b099c077b9551f7163c05ea0f160a7b',
'mozilla': 'ba5f8a4aeee89b1fe88c764416ee9875584a10d3',
'simdjs': '55aa4085d018aaf24dc2bc07421515e23cd8a006',
},
'blamelist': ['<EMAIL>', '<EMAIL>'],
'branch': 'master',
'parent_got_revision_cp': 'refs/heads/master@{#32376}',
'requestedAt': 1448632553,
'revision': '<KEY>',
'override_changes': [
{'revision': '469675ee3f137970158305957a76615d33ff253c'},
{'revision': 'd290f204938295bfecc5c8e645ccfcff6e80ddb8'},
{'revision': '<KEY>'},
],
'bisect_duration_factor': 0.5,
'testfilter': [
'cctest/test-serialize/ContextDeserialization',
],
},
'ok_ret': [1],
'verifiers': [
{
'name': 'verify suspects',
'regexp': r'Suspecting multiple commits(?:.|\s)*'
r'd290f204(?:.|\s)*c08e9525',
},
],
},
]
def RunSteps(api):
api.gclient.set_config('build')
api.bot_update.ensure_checkout()
for test in TESTS:
try:
api.python(
name=test['name'],
script=api.path['checkout'].join(
'scripts', 'tools', 'run_recipe.py'),
args=[
'v8',
'--properties-file',
api.json.input(test['properties'])
],
ok_ret=test['ok_ret'],
stdout=api.raw_io.output_text(),
)
finally:
result = api.step.active_result
# Make consumed output visible again.
result.presentation.logs['stdout'] = result.stdout.splitlines()
# Show return code to ease debugging.
result.presentation.logs['retcode'] = [str(result.retcode)]
# Assert invariants.
for verifier in test['verifiers']:
if not re.search(verifier['regexp'], result.stdout):
result.presentation.status = api.step.FAILURE
result.presentation.logs[verifier['name']] = [
'Regular expression "%s" did not match.' % verifier['regexp']]
# Make the overall build fail.
raise api.step.StepFailure('Verifier did not match.')
def GenTests(api):
yield (
api.test('v8-auto-bisect-end-to-end-pass') +
api.properties.generic(
mastername='chromium.tools.build',
buildername='v8-linux-end-to-end',
) +
api.override_step_data(
'Test auto-bisect on tester',
api.raw_io.stream_output(
'Suspecting multiple commits@@\n@@\n@@d290f204@@@\n@@@c08e9525',
stream='stdout',
),
retcode=1,
)
)
yield (
api.test('v8-auto-bisect-end-to-end-fail') +
api.properties.generic(
mastername='chromium.tools.build',
buildername='v8-linux-end-to-end',
) +
api.override_step_data(
'Test auto-bisect on tester',
api.raw_io.stream_output(
'Suspecting multiple commits\ndeadbeef\ndeadbeef',
stream='stdout',
),
retcode=1,
)
) | scripts/slave/recipes/v8/infra_end_to_end.py |
import re
DEPS = [
'depot_tools/bot_update',
'depot_tools/gclient',
'recipe_engine/json',
'recipe_engine/path',
'recipe_engine/properties',
'recipe_engine/python',
'recipe_engine/raw_io',
'recipe_engine/step',
]
TESTS = [
{
'name': 'Test auto-bisect on tester',
'properties': {
'workdir': '/b/build/slave/linux',
'repository': 'https://chromium.googlesource.com/v8/v8',
'buildername': 'V8 Linux - nosnap',
'parent_buildnumber': 9423,
'recipe': 'v8',
'mastername':
'client.v8',
'buildbotURL': 'http://build.chromium.org/p/client.v8/',
'project': 'v8',
'parent_buildername': 'V8 Linux - nosnap builder',
'git_revision': 'c08e952566c3923f8fcbd693dae05f8eae73938b',
'parent_got_revision': 'c08e952566c3923f8fcbd693dae05f8eae73938b',
'parent_got_swarming_client_revision':
'df99a00d96fae932bae824dccba13156bf7eddd0',
'buildnumber': 5472,
'bot_id': 'slave4-c3',
'swarm_hashes': {
'bot_default': '3726ca899b099c077b9551f7163c05ea0f160a7b',
'mozilla': 'ba5f8a4aeee89b1fe88c764416ee9875584a10d3',
'simdjs': '55aa4085d018aaf24dc2bc07421515e23cd8a006',
},
'blamelist': ['<EMAIL>', '<EMAIL>'],
'branch': 'master',
'parent_got_revision_cp': 'refs/heads/master@{#32376}',
'requestedAt': 1448632553,
'revision': '<KEY>',
'override_changes': [
{'revision': '469675ee3f137970158305957a76615d33ff253c'},
{'revision': 'd290f204938295bfecc5c8e645ccfcff6e80ddb8'},
{'revision': '<KEY>'},
],
'bisect_duration_factor': 0.5,
'testfilter': [
'cctest/test-serialize/ContextDeserialization',
],
},
'ok_ret': [1],
'verifiers': [
{
'name': 'verify suspects',
'regexp': r'Suspecting multiple commits(?:.|\s)*'
r'd290f204(?:.|\s)*c08e9525',
},
],
},
]
def RunSteps(api):
api.gclient.set_config('build')
api.bot_update.ensure_checkout()
for test in TESTS:
try:
api.python(
name=test['name'],
script=api.path['checkout'].join(
'scripts', 'tools', 'run_recipe.py'),
args=[
'v8',
'--properties-file',
api.json.input(test['properties'])
],
ok_ret=test['ok_ret'],
stdout=api.raw_io.output_text(),
)
finally:
result = api.step.active_result
# Make consumed output visible again.
result.presentation.logs['stdout'] = result.stdout.splitlines()
# Show return code to ease debugging.
result.presentation.logs['retcode'] = [str(result.retcode)]
# Assert invariants.
for verifier in test['verifiers']:
if not re.search(verifier['regexp'], result.stdout):
result.presentation.status = api.step.FAILURE
result.presentation.logs[verifier['name']] = [
'Regular expression "%s" did not match.' % verifier['regexp']]
# Make the overall build fail.
raise api.step.StepFailure('Verifier did not match.')
def GenTests(api):
yield (
api.test('v8-auto-bisect-end-to-end-pass') +
api.properties.generic(
mastername='chromium.tools.build',
buildername='v8-linux-end-to-end',
) +
api.override_step_data(
'Test auto-bisect on tester',
api.raw_io.stream_output(
'Suspecting multiple commits@@\n@@\n@@d290f204@@@\n@@@c08e9525',
stream='stdout',
),
retcode=1,
)
)
yield (
api.test('v8-auto-bisect-end-to-end-fail') +
api.properties.generic(
mastername='chromium.tools.build',
buildername='v8-linux-end-to-end',
) +
api.override_step_data(
'Test auto-bisect on tester',
api.raw_io.stream_output(
'Suspecting multiple commits\ndeadbeef\ndeadbeef',
stream='stdout',
),
retcode=1,
)
) | 0.363195 | 0.135775 |
from .grammars import Language
java15 = Language("Java 1.5","""
goal ::= compilation_unit
literal ::= "INTEGER_LITERAL"
| "FLOATING_POINT_LITERAL"
| "BOOLEAN_LITERAL"
| "CHARACTER_LITERAL"
| "STRING_LITERAL"
| "NULL_LITERAL"
type ::= primitive_type
| reference_type
primitive_type ::=
numeric_type
| "BOOLEAN"
numeric_type::= integral_type
| floating_point_type
integral_type ::=
"BYTE"
| "SHORT"
| "INT"
| "LONG"
| "CHAR"
floating_point_type ::=
"FLOAT"
| "DOUBLE"
reference_type ::=
class_or_interface_type
| array_type
type_variable ::=
"IDENTIFIER"
class_or_interface ::=
name
| class_or_interface "LT" type_argument_list_1 "DOT" name
class_or_interface_type ::=
class_or_interface
| class_or_interface "LT" type_argument_list_1
class_type ::= class_or_interface_type
interface_type ::= class_or_interface_type
array_type ::= primitive_type dims
| name dims
| class_or_interface "LT" type_argument_list_1 "DOT" name dims
| class_or_interface "LT" type_argument_list_1 dims
type_arguments_opt ::= type_arguments |
type_arguments ::=
"LT" type_argument_list_1
wildcard ::= "QUESTION"
| "QUESTION" "EXTENDS" reference_type
| "QUESTION" "SUPER" reference_type
wildcard_1 ::= "QUESTION" "GT"
| "QUESTION" "EXTENDS" reference_type_1
| "QUESTION" "SUPER" reference_type_1
wildcard_2 ::= "QUESTION" "RSHIFT"
| "QUESTION" "EXTENDS" reference_type_2
| "QUESTION" "SUPER" reference_type_2
wildcard_3 ::= "QUESTION" "URSHIFT"
| "QUESTION" "EXTENDS" reference_type_3
| "QUESTION" "SUPER" reference_type_3
reference_type_1 ::=
reference_type "GT"
| class_or_interface "LT" type_argument_list_2
reference_type_2 ::=
reference_type "RSHIFT"
| class_or_interface "LT" type_argument_list_3
reference_type_3 ::=
reference_type "URSHIFT"
type_argument_list ::=
type_argument
| type_argument_list "COMMA" type_argument
type_argument_list_1 ::=
type_argument_1
| type_argument_list "COMMA" type_argument_1
type_argument_list_2 ::=
type_argument_2
| type_argument_list "COMMA" type_argument_2
type_argument_list_3 ::=
type_argument_3
| type_argument_list "COMMA" type_argument_3
type_argument ::=
reference_type
| wildcard
type_argument_1 ::=
reference_type_1
| wildcard_1
type_argument_2 ::=
reference_type_2
| wildcard_2
type_argument_3 ::=
reference_type_3
| wildcard_3
name ::= simple_name
| qualified_name
simple_name ::= "IDENTIFIER"
qualified_name ::=
name "DOT" "IDENTIFIER"
compilation_unit ::=
package_declaration_opt
import_declarations_opt
type_declarations_opt
package_declaration_opt ::= package_declaration |
import_declarations_opt ::= import_declarations |
type_declarations_opt ::= type_declarations |
import_declarations ::=
import_declaration
| import_declarations import_declaration
type_declarations ::=
type_declaration
| type_declarations type_declaration
package_declaration ::=
"PACKAGE" name "SEMICOLON"
import_declaration ::=
single_type_import_declaration
| type_import_on_demand_declaration
| static_single_type_import_declaration
| static_type_import_on_demand_declaration
single_type_import_declaration ::=
"IMPORT" name "SEMICOLON"
static_single_type_import_declaration ::=
"IMPORT" "STATIC" name "SEMICOLON"
type_import_on_demand_declaration ::=
"IMPORT" name "DOT" "MULT" "SEMICOLON"
static_type_import_on_demand_declaration ::=
"IMPORT" "STATIC" name "DOT" "MULT" "SEMICOLON"
type_declaration ::=
class_declaration
| enum_declaration
| interface_declaration
| "SEMICOLON"
modifiers_opt::=
| modifiers
modifiers ::= modifier
| modifiers modifier
modifier ::= "PUBLIC" | "PROTECTED" | "PRIVATE"
| "STATIC"
| "ABSTRACT" | "FINAL" | "NATIVE" | "SYNCHRONIZED" | "TRANSIENT" | "VOLATILE"
| "STRICTFP"
class_declaration ::=
modifiers_opt "CLASS" "IDENTIFIER" type_parameters_opt
super_opt interfaces_opt class_body
super ::= "EXTENDS" class_type
super_opt ::=
| super
interfaces ::= "IMPLEMENTS" interface_type_list
interfaces_opt::=
| interfaces
interface_type_list ::=
interface_type
| interface_type_list "COMMA" interface_type
class_body ::= "LBRACE" class_body_declarations_opt "RBRACE"
class_body_opt ::=
| class_body
class_body_declarations_opt ::=
| class_body_declarations
class_body_declarations ::=
class_body_declaration
| class_body_declarations class_body_declaration
class_body_declaration ::=
class_member_declaration
| static_initializer
| constructor_declaration
| block
class_member_declaration ::=
field_declaration
| method_declaration
| modifiers_opt "CLASS" "IDENTIFIER" type_parameters_opt super_opt interfaces_opt class_body
| enum_declaration
| interface_declaration
| "SEMICOLON"
enum_declaration ::=
modifiers_opt "ENUM" "IDENTIFIER" interfaces_opt enum_body
enum_body ::=
"LBRACE" enum_constants_opt enum_body_declarations_opt "RBRACE"
enum_constants_opt ::=
| enum_constants
enum_constants ::=
enum_constant
| enum_constants "COMMA" enum_constant
enum_constant ::=
"IDENTIFIER" enum_arguments_opt
| "IDENTIFIER" enum_arguments_opt class_body
enum_arguments_opt ::=
| "LPAREN" argument_list_opt "RPAREN"
enum_body_declarations_opt ::=
| "SEMICOLON" class_body_declarations_opt
field_declaration ::=
modifiers_opt type variable_declarators "SEMICOLON"
variable_declarators ::=
variable_declarator
| variable_declarators "COMMA" variable_declarator
variable_declarator ::=
variable_declarator_id
| variable_declarator_id "EQ" variable_initializer
variable_declarator_id ::=
"IDENTIFIER"
| variable_declarator_id "LBRACK" "RBRACK"
variable_initializer ::=
expression
| array_initializer
method_declaration ::=
method_header method_body
method_header ::=
modifiers_opt type method_declarator throws_opt
| modifiers_opt "LT" type_parameter_list_1 type method_declarator throws_opt
| modifiers_opt "VOID" method_declarator throws_opt
| modifiers_opt "LT" type_parameter_list_1 "VOID" method_declarator throws_opt
method_declarator ::=
"IDENTIFIER" "LPAREN" formal_parameter_list_opt "RPAREN"
| method_declarator "LBRACK" "RBRACK"
formal_parameter_list_opt ::=
| formal_parameter_list
formal_parameter_list ::=
formal_parameter
| formal_parameter_list "COMMA" formal_parameter
formal_parameter ::=
type variable_declarator_id
| "FINAL" type variable_declarator_id
| type "ELLIPSIS" "IDENTIFIER"
| "FINAL" type "ELLIPSIS" "IDENTIFIER"
throws_opt ::=
| throws
throws ::= "THROWS" class_type_list
class_type_list ::=
class_type
| class_type_list "COMMA" class_type
method_body ::= block
| "SEMICOLON"
static_initializer ::=
"STATIC" block
constructor_declaration ::=
modifiers_opt constructor_declarator
throws_opt constructor_body
| modifiers_opt "LT" type_parameter_list_1 constructor_declarator
throws_opt constructor_body
constructor_declarator ::=
simple_name "LPAREN" formal_parameter_list_opt "RPAREN"
constructor_body ::=
"LBRACE" explicit_constructor_invocation
block_statements "RBRACE"
| "LBRACE" explicit_constructor_invocation "RBRACE"
| "LBRACE" block_statements "RBRACE"
| "LBRACE" "RBRACE"
explicit_constructor_invocation ::=
"THIS" "LPAREN" argument_list_opt "RPAREN" "SEMICOLON"
| type_arguments "THIS" "LPAREN" argument_list_opt "RPAREN" "SEMICOLON"
| "SUPER" "LPAREN" argument_list_opt "RPAREN" "SEMICOLON"
| type_arguments "SUPER" "LPAREN" argument_list_opt "RPAREN" "SEMICOLON"
| primary "DOT" "SUPER" "LPAREN" argument_list_opt "RPAREN" "SEMICOLON"
| primary "DOT" type_arguments "SUPER"
"LPAREN" argument_list_opt "RPAREN" "SEMICOLON"
| name "DOT" "SUPER" "LPAREN" argument_list_opt "RPAREN" "SEMICOLON"
| name "DOT" type_arguments "SUPER" "LPAREN" argument_list_opt "RPAREN" "SEMICOLON"
interface_declaration ::=
modifiers_opt "INTERFACE" "IDENTIFIER" type_parameters_opt
extends_interfaces_opt interface_body
extends_interfaces_opt ::=
| extends_interfaces
extends_interfaces ::=
"EXTENDS" interface_type
| extends_interfaces "COMMA" interface_type
interface_body ::=
"LBRACE" interface_member_declarations_opt "RBRACE"
interface_member_declarations_opt ::=
| interface_member_declarations
interface_member_declarations ::=
interface_member_declaration
| interface_member_declarations interface_member_declaration
interface_member_declaration ::=
constant_declaration
| abstract_method_declaration
| class_declaration
| enum_declaration
| interface_declaration
| "SEMICOLON"
constant_declaration ::=
field_declaration
abstract_method_declaration ::=
method_header "SEMICOLON"
array_initializer ::=
"LBRACE" variable_initializers "COMMA" "RBRACE"
| "LBRACE" variable_initializers "RBRACE"
| "LBRACE" "COMMA" "RBRACE"
| "LBRACE" "RBRACE"
variable_initializers ::=
variable_initializer
| variable_initializers "COMMA" variable_initializer
block ::= "LBRACE" block_statements_opt "RBRACE"
block_statements_opt ::=
| block_statements
block_statements ::=
block_statement
| block_statements block_statement
block_statement ::=
local_variable_declaration_statement
| statement
| class_declaration
| enum_declaration
| interface_declaration
local_variable_declaration_statement ::=
local_variable_declaration "SEMICOLON"
local_variable_declaration ::=
type variable_declarators
| "FINAL" type variable_declarators
statement ::= statement_without_trailing_substatement
| labeled_statement
| if_then_statement
| if_then_else_statement
| while_statement
| for_statement
| foreach_statement
statement_no_short_if ::=
statement_without_trailing_substatement
| labeled_statement_no_short_if
| if_then_else_statement_no_short_if
| while_statement_no_short_if
| for_statement_no_short_if
| foreach_statement_no_short_if
statement_without_trailing_substatement ::=
block
| empty_statement
| expression_statement
| switch_statement
| do_statement
| break_statement
| continue_statement
| return_statement
| synchronized_statement
| throw_statement
| try_statement
| assert_statement
empty_statement ::=
"SEMICOLON"
labeled_statement ::=
"IDENTIFIER" "COLON" statement
labeled_statement_no_short_if ::=
"IDENTIFIER" "COLON" statement_no_short_if
expression_statement ::=
statement_expression "SEMICOLON"
statement_expression ::=
assignment
| preincrement_expression
| predecrement_expression
| postincrement_expression
| postdecrement_expression
| method_invocation
| class_instance_creation_expression
if_then_statement ::=
"IF" "LPAREN" expression "RPAREN" statement
if_then_else_statement ::=
"IF" "LPAREN" expression "RPAREN" statement_no_short_if
"ELSE" statement
if_then_else_statement_no_short_if ::=
"IF" "LPAREN" expression "RPAREN" statement_no_short_if
"ELSE" statement_no_short_if
switch_statement ::=
"SWITCH" "LPAREN" expression "RPAREN" switch_block
switch_block ::=
"LBRACE" switch_block_statement_groups switch_labels "RBRACE"
| "LBRACE" switch_block_statement_groups "RBRACE"
| "LBRACE" switch_labels "RBRACE"
| "LBRACE" "RBRACE"
switch_block_statement_groups ::=
switch_block_statement_group
| switch_block_statement_groups switch_block_statement_group
switch_block_statement_group ::=
switch_labels block_statements
switch_labels ::=
switch_label
| switch_labels switch_label
switch_label ::=
"CASE" constant_expression "COLON"
| "DEFAULT" "COLON"
while_statement ::=
"WHILE" "LPAREN" expression "RPAREN" statement
while_statement_no_short_if ::=
"WHILE" "LPAREN" expression "RPAREN" statement_no_short_if
do_statement ::=
"DO" statement "WHILE" "LPAREN" expression "RPAREN" "SEMICOLON"
foreach_statement ::=
"FOR" "LPAREN" type variable_declarator_id "COLON" expression "RPAREN"
statement
| "FOR" "IDENTIFIER" "LPAREN" type variable_declarator_id "IDENTIFIER"
expression "RPAREN" statement
foreach_statement_no_short_if ::=
"FOR" "LPAREN" type variable_declarator_id "COLON" expression "RPAREN"
statement_no_short_if
| "FOR" "IDENTIFIER" "LPAREN" type variable_declarator_id "IDENTIFIER"
expression "RPAREN" statement_no_short_if
for_statement ::=
"FOR" "LPAREN" for_init_opt "SEMICOLON" expression_opt "SEMICOLON"
for_update_opt "RPAREN" statement
for_statement_no_short_if ::=
"FOR" "LPAREN" for_init_opt "SEMICOLON" expression_opt "SEMICOLON"
for_update_opt "RPAREN" statement_no_short_if
for_init_opt ::=
| for_init
for_init ::= statement_expression_list
| local_variable_declaration
for_update_opt ::=
| for_update
for_update ::= statement_expression_list
statement_expression_list ::=
statement_expression
| statement_expression_list "COMMA" statement_expression
identifier_opt ::=
| "IDENTIFIER"
break_statement ::=
"BREAK" identifier_opt "SEMICOLON"
continue_statement ::=
"CONTINUE" identifier_opt "SEMICOLON"
return_statement ::=
"RETURN" expression_opt "SEMICOLON"
throw_statement ::=
"THROW" expression "SEMICOLON"
synchronized_statement ::=
"SYNCHRONIZED" "LPAREN" expression "RPAREN" block
try_statement ::=
"TRY" block catches
| "TRY" block catches_opt finally
catches_opt ::=
| catches
catches ::= catch_clause
| catches catch_clause
catch_clause ::=
"CATCH" "LPAREN" formal_parameter "RPAREN" block
finally ::= "FINALLY" block
assert_statement ::=
"ASSERT" expression "SEMICOLON"
| "ASSERT" expression "COLON" expression "SEMICOLON"
primary ::= primary_no_new_array
| array_creation_init
| array_creation_uninit
primary_no_new_array ::=
literal
| "THIS"
| "LPAREN" name "RPAREN"
| "LPAREN" expression_nn "RPAREN"
| class_instance_creation_expression
| field_access
| method_invocation
| array_access
| name "DOT" "THIS"
| "VOID" "DOT" "CLASS"
| primitive_type "DOT" "CLASS"
| primitive_type dims "DOT" "CLASS"
| name "DOT" "CLASS"
| name dims "DOT" "CLASS"
class_instance_creation_expression ::=
"NEW" class_or_interface_type "LPAREN" argument_list_opt "RPAREN" class_body_opt
| "NEW" type_arguments class_or_interface_type "LPAREN" argument_list_opt "RPAREN" class_body_opt
| primary "DOT" "NEW" type_arguments_opt "IDENTIFIER" type_arguments_opt
"LPAREN" argument_list_opt "RPAREN" class_body_opt
| name "DOT" "NEW" type_arguments_opt "IDENTIFIER" type_arguments_opt
"LPAREN" argument_list_opt "RPAREN" class_body_opt
argument_list_opt ::=
| argument_list
argument_list ::=
expression
| argument_list "COMMA" expression
array_creation_uninit ::=
"NEW" primitive_type dim_exprs dims_opt
| "NEW" class_or_interface_type dim_exprs dims_opt
array_creation_init ::=
"NEW" primitive_type dims array_initializer
| "NEW" class_or_interface_type dims array_initializer
dim_exprs ::= dim_expr
| dim_exprs dim_expr
dim_expr ::= "LBRACK" expression "RBRACK"
dims_opt ::=
| dims
dims ::= "LBRACK" "RBRACK"
| dims "LBRACK" "RBRACK"
field_access ::=
primary "DOT" "IDENTIFIER"
| "SUPER" "DOT" "IDENTIFIER"
| name "DOT" "SUPER" "DOT" "IDENTIFIER"
method_invocation ::=
name "LPAREN" argument_list_opt "RPAREN"
| primary "DOT" "IDENTIFIER" "LPAREN" argument_list_opt "RPAREN"
| primary "DOT" type_arguments "IDENTIFIER" "LPAREN" argument_list_opt "RPAREN"
| name "DOT" type_arguments "IDENTIFIER" "LPAREN" argument_list_opt "RPAREN"
| "SUPER" "DOT" "IDENTIFIER" "LPAREN" argument_list_opt "RPAREN"
| "SUPER" "DOT" type_arguments "IDENTIFIER" "LPAREN" argument_list_opt "RPAREN"
| name "DOT" "SUPER" "DOT" "IDENTIFIER" "LPAREN" argument_list_opt "RPAREN"
| name "DOT" "SUPER" "DOT" type_arguments "IDENTIFIER" "LPAREN" argument_list_opt "RPAREN"
array_access ::=
name "LBRACK" expression "RBRACK"
| primary_no_new_array "LBRACK" expression "RBRACK"
| array_creation_init "LBRACK" expression "RBRACK"
postfix_expression ::=
primary
| name
| postincrement_expression
| postdecrement_expression
postincrement_expression ::=
postfix_expression "PLUSPLUS"
postdecrement_expression ::=
postfix_expression "MINUSMINUS"
unary_expression ::=
preincrement_expression
| predecrement_expression
| "PLUS" unary_expression
| "MINUS" unary_expression
| unary_expression_not_plus_minus
preincrement_expression ::=
"PLUSPLUS" unary_expression
predecrement_expression ::=
"MINUSMINUS" unary_expression
unary_expression_not_plus_minus ::=
postfix_expression
| "COMP" unary_expression
| "NOT" unary_expression
| cast_expression
cast_expression ::=
"LPAREN" primitive_type dims_opt "RPAREN" unary_expression
| "LPAREN" name "RPAREN" unary_expression_not_plus_minus
| "LPAREN" name dims "RPAREN" unary_expression_not_plus_minus
| "LPAREN" name "LT" type_argument_list_1 dims_opt "RPAREN"
unary_expression_not_plus_minus
| "LPAREN" name "LT" type_argument_list_1 "DOT"
class_or_interface_type dims_opt "RPAREN"
unary_expression_not_plus_minus
multiplicative_expression ::=
unary_expression
| multiplicative_expression "MULT" unary_expression
| multiplicative_expression "DIV" unary_expression
| multiplicative_expression "MOD" unary_expression
additive_expression ::=
multiplicative_expression
| additive_expression "PLUS" multiplicative_expression
| additive_expression "MINUS" multiplicative_expression
shift_expression ::=
additive_expression
| shift_expression "LSHIFT" additive_expression
| shift_expression "RSHIFT" additive_expression
| shift_expression "URSHIFT" additive_expression
relational_expression ::=
shift_expression
| relational_expression "LT" shift_expression
| relational_expression "GT" shift_expression
| relational_expression "LTEQ" shift_expression
| relational_expression "GTEQ" shift_expression
instanceof_expression ::=
relational_expression
| instanceof_expression "INSTANCEOF" reference_type
equality_expression ::=
instanceof_expression
| equality_expression "EQEQ" instanceof_expression
| equality_expression "NOTEQ" instanceof_expression
and_expression ::=
equality_expression
| and_expression "AND" equality_expression
exclusive_or_expression ::=
and_expression
| exclusive_or_expression "XOR" and_expression
inclusive_or_expression ::=
exclusive_or_expression
| inclusive_or_expression "OR" exclusive_or_expression
conditional_and_expression ::=
inclusive_or_expression
| conditional_and_expression "ANDAND" inclusive_or_expression
conditional_or_expression ::=
conditional_and_expression
| conditional_or_expression "OROR" conditional_and_expression
conditional_expression ::=
conditional_or_expression
| conditional_or_expression "QUESTION" expression "COLON" conditional_expression
assignment_expression ::=
conditional_expression
| assignment
assignment ::= postfix_expression assignment_operator assignment_expression
assignment_operator ::=
"EQ"
| "MULTEQ"
| "DIVEQ"
| "MODEQ"
| "PLUSEQ"
| "MINUSEQ"
| "LSHIFTEQ"
| "RSHIFTEQ"
| "URSHIFTEQ"
| "ANDEQ"
| "XOREQ"
| "OREQ"
expression_opt ::=
| expression
expression ::= assignment_expression
constant_expression ::=
expression
type_parameters_opt ::= type_parameters |
type_parameters ::=
"LT" type_parameter_list_1
type_parameter_list ::=
type_parameter_list "COMMA" type_parameter
| type_parameter
type_parameter_list_1 ::=
type_parameter_1
| type_parameter_list "COMMA" type_parameter_1
type_parameter ::=
type_variable type_bound_opt
type_parameter_1 ::=
type_variable "GT"
| type_variable type_bound_1
type_bound_opt ::= type_bound |
type_bound ::=
"EXTENDS" reference_type additional_bound_list_opt
type_bound_1 ::=
"EXTENDS" reference_type_1
| "EXTENDS" reference_type additional_bound_list_1
additional_bound_list_opt ::= additional_bound_list |
additional_bound_list ::=
additional_bound additional_bound_list
| additional_bound
additional_bound_list_1 ::=
additional_bound additional_bound_list_1
| additional_bound_1
additional_bound ::=
"AND" interface_type
additional_bound_1 ::=
"AND" reference_type_1
postfix_expression_nn ::=
primary
| postincrement_expression
| postdecrement_expression
unary_expression_nn ::=
preincrement_expression
| predecrement_expression
| "PLUS" unary_expression
| "MINUS" unary_expression
| unary_expression_not_plus_minus_nn
unary_expression_not_plus_minus_nn ::=
postfix_expression_nn
| "COMP" unary_expression
| "NOT" unary_expression
| cast_expression
multiplicative_expression_nn ::=
unary_expression_nn
| name "MULT" unary_expression
| multiplicative_expression_nn "MULT" unary_expression
| name "DIV" unary_expression
| multiplicative_expression_nn "DIV" unary_expression
| name "MOD" unary_expression
| multiplicative_expression_nn "MOD" unary_expression
additive_expression_nn ::=
multiplicative_expression_nn
| name "PLUS" multiplicative_expression
| additive_expression_nn "PLUS" multiplicative_expression
| name "MINUS" multiplicative_expression
| additive_expression_nn "MINUS" multiplicative_expression
shift_expression_nn ::=
additive_expression_nn
| name "LSHIFT" additive_expression
| shift_expression_nn "LSHIFT" additive_expression
| name "RSHIFT" additive_expression
| shift_expression_nn "RSHIFT" additive_expression
| name "URSHIFT" additive_expression
| shift_expression_nn "URSHIFT" additive_expression
relational_expression_nn ::=
shift_expression_nn
| name "LT" shift_expression
| shift_expression_nn "LT" shift_expression
| name "GT" shift_expression
| shift_expression_nn "GT" shift_expression
| name "LTEQ" shift_expression
| relational_expression_nn "LTEQ" shift_expression
| name "GTEQ" shift_expression
| relational_expression_nn "GTEQ" shift_expression
instanceof_expression_nn ::=
relational_expression_nn
| name "INSTANCEOF" reference_type
| instanceof_expression_nn "INSTANCEOF" reference_type
equality_expression_nn ::=
instanceof_expression_nn
| name "EQEQ" instanceof_expression
| equality_expression_nn "EQEQ" instanceof_expression
| name "NOTEQ" instanceof_expression
| equality_expression_nn "NOTEQ" instanceof_expression
and_expression_nn ::=
equality_expression_nn
| name "AND" equality_expression
| and_expression_nn "AND" equality_expression
exclusive_or_expression_nn ::=
and_expression_nn
| name "XOR" and_expression
| exclusive_or_expression_nn "XOR" and_expression
inclusive_or_expression_nn ::=
exclusive_or_expression_nn
| name "OR" exclusive_or_expression
| inclusive_or_expression_nn "OR" exclusive_or_expression
conditional_and_expression_nn ::=
inclusive_or_expression_nn
| name "ANDAND" inclusive_or_expression
| conditional_and_expression_nn "ANDAND" inclusive_or_expression
conditional_or_expression_nn ::=
conditional_and_expression_nn
| name "OROR" conditional_and_expression
| conditional_or_expression_nn "OROR" conditional_and_expression
conditional_expression_nn ::=
conditional_or_expression_nn
| name "QUESTION" expression "COLON" conditional_expression
| conditional_or_expression_nn "QUESTION" expression
"COLON" conditional_expression
assignment_expression_nn ::=
conditional_expression_nn
| assignment
expression_nn ::= assignment_expression_nn
"""
,
"""
"//[^\\r\\n]*":<ws>
"\"(\\\\.|[^\\\\"])*\"":STRING_LITERAL
"\'[^\']*\'":CHARACTER_LITERAL
"boolean":BOOLEAN
"byte":BYTE
"short":SHORT
"int":INT
"long":LONG
"char":CHAR
"float":FLOAT
"double":DOUBLE
"\[":LBRACK
"\]":RBRACK
"\.":DOT
";":SEMICOLON
"\*":MULT
",":COMMA
"{":LBRACE
"}":RBRACE
"=":EQ
"\(":LPAREN
"\)":RPAREN
":":COLON
"package":PACKAGE
"import":IMPORT
"public":PUBLIC
"protected":PROTECTED
"private":PRIVATE
"static":STATIC
"abstract":ABSTRACT
"final":FINAL
"native":NATIVE
"synchronized":SYNCHRONIZED
"transient":TRANSIENT
"volatile":VOLATILE
"class":CLASS
"extends":EXTENDS
"implements":IMPLEMENTS
"void":VOID
"throws":THROWS
"this":THIS
"super":SUPER
"interface":INTERFACE
"if":IF
"else":ELSE
"switch":SWITCH
"case":CASE
"default":DEFAULT
"do":DO
"while":WHILE
"for":FOR
"break":BREAK
"continue":CONTINUE
"return":RETURN
"throw":THROW
"try":TRY
"catch":CATCH
"finally":FINALLY
"assert":ASSERT
"new":NEW
"\+\+":PLUSPLUS
"\-\-":MINUSMINUS
"\+":PLUS
"\-":MINUS
"~":COMP
"!":NOT
"\/":DIV
"\%":MOD
"<<":LSHIFT
">>":RSHIFT
">>>":URSHIFT
"\<\<=":LSHIFTEQ
"\>\>=":RSHIFTEQ
"\>\>\>=":URSHIFTEQ
"\<=":LTEQ
"\>=":GTEQ
"\<":LT
"\>":GT
"instanceof":INSTANCEOF
"==":EQEQ
"!=":NOTEQ
"&&":ANDAND
"\|\|":OROR
"&":AND
"\^":XOR
"\|":OR
"\?":QUESTION
"\*=":MULTEQ
"\/=":DIVEQ
"%=":MODEQ
"\+=":PLUSEQ
"-=":MINUSEQ
"&=":ANDEQ
"\^=":XOREQ
"\|=":OREQ
"0x[0-9A-Fa-f]+|[0-9]+":INTEGER_LITERAL
"[0-9]+\.[0-9]+([eE][0-9]+)?[fFdD]?|[0-9]+[eE][0-9]+[fFdD]?":FLOATING_POINT_LITERAL
"(true|false)":BOOLEAN_LITERAL
"null":NULL_LITERAL
"[a-zA-Z_][a-zA-Z0-9_]*":IDENTIFIER
"const":CONST
"goto":GOTO
"strictfp":STRICTFP
"ellipsis":ELLIPSIS
"enum":ENUM
"[ \\t]+":<ws>
"[\\n\\r]":<return>
""",
"Java"
) | lib/eco/grammars/java15.py |
from .grammars import Language
java15 = Language("Java 1.5","""
goal ::= compilation_unit
literal ::= "INTEGER_LITERAL"
| "FLOATING_POINT_LITERAL"
| "BOOLEAN_LITERAL"
| "CHARACTER_LITERAL"
| "STRING_LITERAL"
| "NULL_LITERAL"
type ::= primitive_type
| reference_type
primitive_type ::=
numeric_type
| "BOOLEAN"
numeric_type::= integral_type
| floating_point_type
integral_type ::=
"BYTE"
| "SHORT"
| "INT"
| "LONG"
| "CHAR"
floating_point_type ::=
"FLOAT"
| "DOUBLE"
reference_type ::=
class_or_interface_type
| array_type
type_variable ::=
"IDENTIFIER"
class_or_interface ::=
name
| class_or_interface "LT" type_argument_list_1 "DOT" name
class_or_interface_type ::=
class_or_interface
| class_or_interface "LT" type_argument_list_1
class_type ::= class_or_interface_type
interface_type ::= class_or_interface_type
array_type ::= primitive_type dims
| name dims
| class_or_interface "LT" type_argument_list_1 "DOT" name dims
| class_or_interface "LT" type_argument_list_1 dims
type_arguments_opt ::= type_arguments |
type_arguments ::=
"LT" type_argument_list_1
wildcard ::= "QUESTION"
| "QUESTION" "EXTENDS" reference_type
| "QUESTION" "SUPER" reference_type
wildcard_1 ::= "QUESTION" "GT"
| "QUESTION" "EXTENDS" reference_type_1
| "QUESTION" "SUPER" reference_type_1
wildcard_2 ::= "QUESTION" "RSHIFT"
| "QUESTION" "EXTENDS" reference_type_2
| "QUESTION" "SUPER" reference_type_2
wildcard_3 ::= "QUESTION" "URSHIFT"
| "QUESTION" "EXTENDS" reference_type_3
| "QUESTION" "SUPER" reference_type_3
reference_type_1 ::=
reference_type "GT"
| class_or_interface "LT" type_argument_list_2
reference_type_2 ::=
reference_type "RSHIFT"
| class_or_interface "LT" type_argument_list_3
reference_type_3 ::=
reference_type "URSHIFT"
type_argument_list ::=
type_argument
| type_argument_list "COMMA" type_argument
type_argument_list_1 ::=
type_argument_1
| type_argument_list "COMMA" type_argument_1
type_argument_list_2 ::=
type_argument_2
| type_argument_list "COMMA" type_argument_2
type_argument_list_3 ::=
type_argument_3
| type_argument_list "COMMA" type_argument_3
type_argument ::=
reference_type
| wildcard
type_argument_1 ::=
reference_type_1
| wildcard_1
type_argument_2 ::=
reference_type_2
| wildcard_2
type_argument_3 ::=
reference_type_3
| wildcard_3
name ::= simple_name
| qualified_name
simple_name ::= "IDENTIFIER"
qualified_name ::=
name "DOT" "IDENTIFIER"
compilation_unit ::=
package_declaration_opt
import_declarations_opt
type_declarations_opt
package_declaration_opt ::= package_declaration |
import_declarations_opt ::= import_declarations |
type_declarations_opt ::= type_declarations |
import_declarations ::=
import_declaration
| import_declarations import_declaration
type_declarations ::=
type_declaration
| type_declarations type_declaration
package_declaration ::=
"PACKAGE" name "SEMICOLON"
import_declaration ::=
single_type_import_declaration
| type_import_on_demand_declaration
| static_single_type_import_declaration
| static_type_import_on_demand_declaration
single_type_import_declaration ::=
"IMPORT" name "SEMICOLON"
static_single_type_import_declaration ::=
"IMPORT" "STATIC" name "SEMICOLON"
type_import_on_demand_declaration ::=
"IMPORT" name "DOT" "MULT" "SEMICOLON"
static_type_import_on_demand_declaration ::=
"IMPORT" "STATIC" name "DOT" "MULT" "SEMICOLON"
type_declaration ::=
class_declaration
| enum_declaration
| interface_declaration
| "SEMICOLON"
modifiers_opt::=
| modifiers
modifiers ::= modifier
| modifiers modifier
modifier ::= "PUBLIC" | "PROTECTED" | "PRIVATE"
| "STATIC"
| "ABSTRACT" | "FINAL" | "NATIVE" | "SYNCHRONIZED" | "TRANSIENT" | "VOLATILE"
| "STRICTFP"
class_declaration ::=
modifiers_opt "CLASS" "IDENTIFIER" type_parameters_opt
super_opt interfaces_opt class_body
super ::= "EXTENDS" class_type
super_opt ::=
| super
interfaces ::= "IMPLEMENTS" interface_type_list
interfaces_opt::=
| interfaces
interface_type_list ::=
interface_type
| interface_type_list "COMMA" interface_type
class_body ::= "LBRACE" class_body_declarations_opt "RBRACE"
class_body_opt ::=
| class_body
class_body_declarations_opt ::=
| class_body_declarations
class_body_declarations ::=
class_body_declaration
| class_body_declarations class_body_declaration
class_body_declaration ::=
class_member_declaration
| static_initializer
| constructor_declaration
| block
class_member_declaration ::=
field_declaration
| method_declaration
| modifiers_opt "CLASS" "IDENTIFIER" type_parameters_opt super_opt interfaces_opt class_body
| enum_declaration
| interface_declaration
| "SEMICOLON"
enum_declaration ::=
modifiers_opt "ENUM" "IDENTIFIER" interfaces_opt enum_body
enum_body ::=
"LBRACE" enum_constants_opt enum_body_declarations_opt "RBRACE"
enum_constants_opt ::=
| enum_constants
enum_constants ::=
enum_constant
| enum_constants "COMMA" enum_constant
enum_constant ::=
"IDENTIFIER" enum_arguments_opt
| "IDENTIFIER" enum_arguments_opt class_body
enum_arguments_opt ::=
| "LPAREN" argument_list_opt "RPAREN"
enum_body_declarations_opt ::=
| "SEMICOLON" class_body_declarations_opt
field_declaration ::=
modifiers_opt type variable_declarators "SEMICOLON"
variable_declarators ::=
variable_declarator
| variable_declarators "COMMA" variable_declarator
variable_declarator ::=
variable_declarator_id
| variable_declarator_id "EQ" variable_initializer
variable_declarator_id ::=
"IDENTIFIER"
| variable_declarator_id "LBRACK" "RBRACK"
variable_initializer ::=
expression
| array_initializer
method_declaration ::=
method_header method_body
method_header ::=
modifiers_opt type method_declarator throws_opt
| modifiers_opt "LT" type_parameter_list_1 type method_declarator throws_opt
| modifiers_opt "VOID" method_declarator throws_opt
| modifiers_opt "LT" type_parameter_list_1 "VOID" method_declarator throws_opt
method_declarator ::=
"IDENTIFIER" "LPAREN" formal_parameter_list_opt "RPAREN"
| method_declarator "LBRACK" "RBRACK"
formal_parameter_list_opt ::=
| formal_parameter_list
formal_parameter_list ::=
formal_parameter
| formal_parameter_list "COMMA" formal_parameter
formal_parameter ::=
type variable_declarator_id
| "FINAL" type variable_declarator_id
| type "ELLIPSIS" "IDENTIFIER"
| "FINAL" type "ELLIPSIS" "IDENTIFIER"
throws_opt ::=
| throws
throws ::= "THROWS" class_type_list
class_type_list ::=
class_type
| class_type_list "COMMA" class_type
method_body ::= block
| "SEMICOLON"
static_initializer ::=
"STATIC" block
constructor_declaration ::=
modifiers_opt constructor_declarator
throws_opt constructor_body
| modifiers_opt "LT" type_parameter_list_1 constructor_declarator
throws_opt constructor_body
constructor_declarator ::=
simple_name "LPAREN" formal_parameter_list_opt "RPAREN"
constructor_body ::=
"LBRACE" explicit_constructor_invocation
block_statements "RBRACE"
| "LBRACE" explicit_constructor_invocation "RBRACE"
| "LBRACE" block_statements "RBRACE"
| "LBRACE" "RBRACE"
explicit_constructor_invocation ::=
"THIS" "LPAREN" argument_list_opt "RPAREN" "SEMICOLON"
| type_arguments "THIS" "LPAREN" argument_list_opt "RPAREN" "SEMICOLON"
| "SUPER" "LPAREN" argument_list_opt "RPAREN" "SEMICOLON"
| type_arguments "SUPER" "LPAREN" argument_list_opt "RPAREN" "SEMICOLON"
| primary "DOT" "SUPER" "LPAREN" argument_list_opt "RPAREN" "SEMICOLON"
| primary "DOT" type_arguments "SUPER"
"LPAREN" argument_list_opt "RPAREN" "SEMICOLON"
| name "DOT" "SUPER" "LPAREN" argument_list_opt "RPAREN" "SEMICOLON"
| name "DOT" type_arguments "SUPER" "LPAREN" argument_list_opt "RPAREN" "SEMICOLON"
interface_declaration ::=
modifiers_opt "INTERFACE" "IDENTIFIER" type_parameters_opt
extends_interfaces_opt interface_body
extends_interfaces_opt ::=
| extends_interfaces
extends_interfaces ::=
"EXTENDS" interface_type
| extends_interfaces "COMMA" interface_type
interface_body ::=
"LBRACE" interface_member_declarations_opt "RBRACE"
interface_member_declarations_opt ::=
| interface_member_declarations
interface_member_declarations ::=
interface_member_declaration
| interface_member_declarations interface_member_declaration
interface_member_declaration ::=
constant_declaration
| abstract_method_declaration
| class_declaration
| enum_declaration
| interface_declaration
| "SEMICOLON"
constant_declaration ::=
field_declaration
abstract_method_declaration ::=
method_header "SEMICOLON"
array_initializer ::=
"LBRACE" variable_initializers "COMMA" "RBRACE"
| "LBRACE" variable_initializers "RBRACE"
| "LBRACE" "COMMA" "RBRACE"
| "LBRACE" "RBRACE"
variable_initializers ::=
variable_initializer
| variable_initializers "COMMA" variable_initializer
block ::= "LBRACE" block_statements_opt "RBRACE"
block_statements_opt ::=
| block_statements
block_statements ::=
block_statement
| block_statements block_statement
block_statement ::=
local_variable_declaration_statement
| statement
| class_declaration
| enum_declaration
| interface_declaration
local_variable_declaration_statement ::=
local_variable_declaration "SEMICOLON"
local_variable_declaration ::=
type variable_declarators
| "FINAL" type variable_declarators
statement ::= statement_without_trailing_substatement
| labeled_statement
| if_then_statement
| if_then_else_statement
| while_statement
| for_statement
| foreach_statement
statement_no_short_if ::=
statement_without_trailing_substatement
| labeled_statement_no_short_if
| if_then_else_statement_no_short_if
| while_statement_no_short_if
| for_statement_no_short_if
| foreach_statement_no_short_if
statement_without_trailing_substatement ::=
block
| empty_statement
| expression_statement
| switch_statement
| do_statement
| break_statement
| continue_statement
| return_statement
| synchronized_statement
| throw_statement
| try_statement
| assert_statement
empty_statement ::=
"SEMICOLON"
labeled_statement ::=
"IDENTIFIER" "COLON" statement
labeled_statement_no_short_if ::=
"IDENTIFIER" "COLON" statement_no_short_if
expression_statement ::=
statement_expression "SEMICOLON"
statement_expression ::=
assignment
| preincrement_expression
| predecrement_expression
| postincrement_expression
| postdecrement_expression
| method_invocation
| class_instance_creation_expression
if_then_statement ::=
"IF" "LPAREN" expression "RPAREN" statement
if_then_else_statement ::=
"IF" "LPAREN" expression "RPAREN" statement_no_short_if
"ELSE" statement
if_then_else_statement_no_short_if ::=
"IF" "LPAREN" expression "RPAREN" statement_no_short_if
"ELSE" statement_no_short_if
switch_statement ::=
"SWITCH" "LPAREN" expression "RPAREN" switch_block
switch_block ::=
"LBRACE" switch_block_statement_groups switch_labels "RBRACE"
| "LBRACE" switch_block_statement_groups "RBRACE"
| "LBRACE" switch_labels "RBRACE"
| "LBRACE" "RBRACE"
switch_block_statement_groups ::=
switch_block_statement_group
| switch_block_statement_groups switch_block_statement_group
switch_block_statement_group ::=
switch_labels block_statements
switch_labels ::=
switch_label
| switch_labels switch_label
switch_label ::=
"CASE" constant_expression "COLON"
| "DEFAULT" "COLON"
while_statement ::=
"WHILE" "LPAREN" expression "RPAREN" statement
while_statement_no_short_if ::=
"WHILE" "LPAREN" expression "RPAREN" statement_no_short_if
do_statement ::=
"DO" statement "WHILE" "LPAREN" expression "RPAREN" "SEMICOLON"
foreach_statement ::=
"FOR" "LPAREN" type variable_declarator_id "COLON" expression "RPAREN"
statement
| "FOR" "IDENTIFIER" "LPAREN" type variable_declarator_id "IDENTIFIER"
expression "RPAREN" statement
foreach_statement_no_short_if ::=
"FOR" "LPAREN" type variable_declarator_id "COLON" expression "RPAREN"
statement_no_short_if
| "FOR" "IDENTIFIER" "LPAREN" type variable_declarator_id "IDENTIFIER"
expression "RPAREN" statement_no_short_if
for_statement ::=
"FOR" "LPAREN" for_init_opt "SEMICOLON" expression_opt "SEMICOLON"
for_update_opt "RPAREN" statement
for_statement_no_short_if ::=
"FOR" "LPAREN" for_init_opt "SEMICOLON" expression_opt "SEMICOLON"
for_update_opt "RPAREN" statement_no_short_if
for_init_opt ::=
| for_init
for_init ::= statement_expression_list
| local_variable_declaration
for_update_opt ::=
| for_update
for_update ::= statement_expression_list
statement_expression_list ::=
statement_expression
| statement_expression_list "COMMA" statement_expression
identifier_opt ::=
| "IDENTIFIER"
break_statement ::=
"BREAK" identifier_opt "SEMICOLON"
continue_statement ::=
"CONTINUE" identifier_opt "SEMICOLON"
return_statement ::=
"RETURN" expression_opt "SEMICOLON"
throw_statement ::=
"THROW" expression "SEMICOLON"
synchronized_statement ::=
"SYNCHRONIZED" "LPAREN" expression "RPAREN" block
try_statement ::=
"TRY" block catches
| "TRY" block catches_opt finally
catches_opt ::=
| catches
catches ::= catch_clause
| catches catch_clause
catch_clause ::=
"CATCH" "LPAREN" formal_parameter "RPAREN" block
finally ::= "FINALLY" block
assert_statement ::=
"ASSERT" expression "SEMICOLON"
| "ASSERT" expression "COLON" expression "SEMICOLON"
primary ::= primary_no_new_array
| array_creation_init
| array_creation_uninit
primary_no_new_array ::=
literal
| "THIS"
| "LPAREN" name "RPAREN"
| "LPAREN" expression_nn "RPAREN"
| class_instance_creation_expression
| field_access
| method_invocation
| array_access
| name "DOT" "THIS"
| "VOID" "DOT" "CLASS"
| primitive_type "DOT" "CLASS"
| primitive_type dims "DOT" "CLASS"
| name "DOT" "CLASS"
| name dims "DOT" "CLASS"
class_instance_creation_expression ::=
"NEW" class_or_interface_type "LPAREN" argument_list_opt "RPAREN" class_body_opt
| "NEW" type_arguments class_or_interface_type "LPAREN" argument_list_opt "RPAREN" class_body_opt
| primary "DOT" "NEW" type_arguments_opt "IDENTIFIER" type_arguments_opt
"LPAREN" argument_list_opt "RPAREN" class_body_opt
| name "DOT" "NEW" type_arguments_opt "IDENTIFIER" type_arguments_opt
"LPAREN" argument_list_opt "RPAREN" class_body_opt
argument_list_opt ::=
| argument_list
argument_list ::=
expression
| argument_list "COMMA" expression
array_creation_uninit ::=
"NEW" primitive_type dim_exprs dims_opt
| "NEW" class_or_interface_type dim_exprs dims_opt
array_creation_init ::=
"NEW" primitive_type dims array_initializer
| "NEW" class_or_interface_type dims array_initializer
dim_exprs ::= dim_expr
| dim_exprs dim_expr
dim_expr ::= "LBRACK" expression "RBRACK"
dims_opt ::=
| dims
dims ::= "LBRACK" "RBRACK"
| dims "LBRACK" "RBRACK"
field_access ::=
primary "DOT" "IDENTIFIER"
| "SUPER" "DOT" "IDENTIFIER"
| name "DOT" "SUPER" "DOT" "IDENTIFIER"
method_invocation ::=
name "LPAREN" argument_list_opt "RPAREN"
| primary "DOT" "IDENTIFIER" "LPAREN" argument_list_opt "RPAREN"
| primary "DOT" type_arguments "IDENTIFIER" "LPAREN" argument_list_opt "RPAREN"
| name "DOT" type_arguments "IDENTIFIER" "LPAREN" argument_list_opt "RPAREN"
| "SUPER" "DOT" "IDENTIFIER" "LPAREN" argument_list_opt "RPAREN"
| "SUPER" "DOT" type_arguments "IDENTIFIER" "LPAREN" argument_list_opt "RPAREN"
| name "DOT" "SUPER" "DOT" "IDENTIFIER" "LPAREN" argument_list_opt "RPAREN"
| name "DOT" "SUPER" "DOT" type_arguments "IDENTIFIER" "LPAREN" argument_list_opt "RPAREN"
array_access ::=
name "LBRACK" expression "RBRACK"
| primary_no_new_array "LBRACK" expression "RBRACK"
| array_creation_init "LBRACK" expression "RBRACK"
postfix_expression ::=
primary
| name
| postincrement_expression
| postdecrement_expression
postincrement_expression ::=
postfix_expression "PLUSPLUS"
postdecrement_expression ::=
postfix_expression "MINUSMINUS"
unary_expression ::=
preincrement_expression
| predecrement_expression
| "PLUS" unary_expression
| "MINUS" unary_expression
| unary_expression_not_plus_minus
preincrement_expression ::=
"PLUSPLUS" unary_expression
predecrement_expression ::=
"MINUSMINUS" unary_expression
unary_expression_not_plus_minus ::=
postfix_expression
| "COMP" unary_expression
| "NOT" unary_expression
| cast_expression
cast_expression ::=
"LPAREN" primitive_type dims_opt "RPAREN" unary_expression
| "LPAREN" name "RPAREN" unary_expression_not_plus_minus
| "LPAREN" name dims "RPAREN" unary_expression_not_plus_minus
| "LPAREN" name "LT" type_argument_list_1 dims_opt "RPAREN"
unary_expression_not_plus_minus
| "LPAREN" name "LT" type_argument_list_1 "DOT"
class_or_interface_type dims_opt "RPAREN"
unary_expression_not_plus_minus
multiplicative_expression ::=
unary_expression
| multiplicative_expression "MULT" unary_expression
| multiplicative_expression "DIV" unary_expression
| multiplicative_expression "MOD" unary_expression
additive_expression ::=
multiplicative_expression
| additive_expression "PLUS" multiplicative_expression
| additive_expression "MINUS" multiplicative_expression
shift_expression ::=
additive_expression
| shift_expression "LSHIFT" additive_expression
| shift_expression "RSHIFT" additive_expression
| shift_expression "URSHIFT" additive_expression
relational_expression ::=
shift_expression
| relational_expression "LT" shift_expression
| relational_expression "GT" shift_expression
| relational_expression "LTEQ" shift_expression
| relational_expression "GTEQ" shift_expression
instanceof_expression ::=
relational_expression
| instanceof_expression "INSTANCEOF" reference_type
equality_expression ::=
instanceof_expression
| equality_expression "EQEQ" instanceof_expression
| equality_expression "NOTEQ" instanceof_expression
and_expression ::=
equality_expression
| and_expression "AND" equality_expression
exclusive_or_expression ::=
and_expression
| exclusive_or_expression "XOR" and_expression
inclusive_or_expression ::=
exclusive_or_expression
| inclusive_or_expression "OR" exclusive_or_expression
conditional_and_expression ::=
inclusive_or_expression
| conditional_and_expression "ANDAND" inclusive_or_expression
conditional_or_expression ::=
conditional_and_expression
| conditional_or_expression "OROR" conditional_and_expression
conditional_expression ::=
conditional_or_expression
| conditional_or_expression "QUESTION" expression "COLON" conditional_expression
assignment_expression ::=
conditional_expression
| assignment
assignment ::= postfix_expression assignment_operator assignment_expression
assignment_operator ::=
"EQ"
| "MULTEQ"
| "DIVEQ"
| "MODEQ"
| "PLUSEQ"
| "MINUSEQ"
| "LSHIFTEQ"
| "RSHIFTEQ"
| "URSHIFTEQ"
| "ANDEQ"
| "XOREQ"
| "OREQ"
expression_opt ::=
| expression
expression ::= assignment_expression
constant_expression ::=
expression
type_parameters_opt ::= type_parameters |
type_parameters ::=
"LT" type_parameter_list_1
type_parameter_list ::=
type_parameter_list "COMMA" type_parameter
| type_parameter
type_parameter_list_1 ::=
type_parameter_1
| type_parameter_list "COMMA" type_parameter_1
type_parameter ::=
type_variable type_bound_opt
type_parameter_1 ::=
type_variable "GT"
| type_variable type_bound_1
type_bound_opt ::= type_bound |
type_bound ::=
"EXTENDS" reference_type additional_bound_list_opt
type_bound_1 ::=
"EXTENDS" reference_type_1
| "EXTENDS" reference_type additional_bound_list_1
additional_bound_list_opt ::= additional_bound_list |
additional_bound_list ::=
additional_bound additional_bound_list
| additional_bound
additional_bound_list_1 ::=
additional_bound additional_bound_list_1
| additional_bound_1
additional_bound ::=
"AND" interface_type
additional_bound_1 ::=
"AND" reference_type_1
postfix_expression_nn ::=
primary
| postincrement_expression
| postdecrement_expression
unary_expression_nn ::=
preincrement_expression
| predecrement_expression
| "PLUS" unary_expression
| "MINUS" unary_expression
| unary_expression_not_plus_minus_nn
unary_expression_not_plus_minus_nn ::=
postfix_expression_nn
| "COMP" unary_expression
| "NOT" unary_expression
| cast_expression
multiplicative_expression_nn ::=
unary_expression_nn
| name "MULT" unary_expression
| multiplicative_expression_nn "MULT" unary_expression
| name "DIV" unary_expression
| multiplicative_expression_nn "DIV" unary_expression
| name "MOD" unary_expression
| multiplicative_expression_nn "MOD" unary_expression
additive_expression_nn ::=
multiplicative_expression_nn
| name "PLUS" multiplicative_expression
| additive_expression_nn "PLUS" multiplicative_expression
| name "MINUS" multiplicative_expression
| additive_expression_nn "MINUS" multiplicative_expression
shift_expression_nn ::=
additive_expression_nn
| name "LSHIFT" additive_expression
| shift_expression_nn "LSHIFT" additive_expression
| name "RSHIFT" additive_expression
| shift_expression_nn "RSHIFT" additive_expression
| name "URSHIFT" additive_expression
| shift_expression_nn "URSHIFT" additive_expression
relational_expression_nn ::=
shift_expression_nn
| name "LT" shift_expression
| shift_expression_nn "LT" shift_expression
| name "GT" shift_expression
| shift_expression_nn "GT" shift_expression
| name "LTEQ" shift_expression
| relational_expression_nn "LTEQ" shift_expression
| name "GTEQ" shift_expression
| relational_expression_nn "GTEQ" shift_expression
instanceof_expression_nn ::=
relational_expression_nn
| name "INSTANCEOF" reference_type
| instanceof_expression_nn "INSTANCEOF" reference_type
equality_expression_nn ::=
instanceof_expression_nn
| name "EQEQ" instanceof_expression
| equality_expression_nn "EQEQ" instanceof_expression
| name "NOTEQ" instanceof_expression
| equality_expression_nn "NOTEQ" instanceof_expression
and_expression_nn ::=
equality_expression_nn
| name "AND" equality_expression
| and_expression_nn "AND" equality_expression
exclusive_or_expression_nn ::=
and_expression_nn
| name "XOR" and_expression
| exclusive_or_expression_nn "XOR" and_expression
inclusive_or_expression_nn ::=
exclusive_or_expression_nn
| name "OR" exclusive_or_expression
| inclusive_or_expression_nn "OR" exclusive_or_expression
conditional_and_expression_nn ::=
inclusive_or_expression_nn
| name "ANDAND" inclusive_or_expression
| conditional_and_expression_nn "ANDAND" inclusive_or_expression
conditional_or_expression_nn ::=
conditional_and_expression_nn
| name "OROR" conditional_and_expression
| conditional_or_expression_nn "OROR" conditional_and_expression
conditional_expression_nn ::=
conditional_or_expression_nn
| name "QUESTION" expression "COLON" conditional_expression
| conditional_or_expression_nn "QUESTION" expression
"COLON" conditional_expression
assignment_expression_nn ::=
conditional_expression_nn
| assignment
expression_nn ::= assignment_expression_nn
"""
,
"""
"//[^\\r\\n]*":<ws>
"\"(\\\\.|[^\\\\"])*\"":STRING_LITERAL
"\'[^\']*\'":CHARACTER_LITERAL
"boolean":BOOLEAN
"byte":BYTE
"short":SHORT
"int":INT
"long":LONG
"char":CHAR
"float":FLOAT
"double":DOUBLE
"\[":LBRACK
"\]":RBRACK
"\.":DOT
";":SEMICOLON
"\*":MULT
",":COMMA
"{":LBRACE
"}":RBRACE
"=":EQ
"\(":LPAREN
"\)":RPAREN
":":COLON
"package":PACKAGE
"import":IMPORT
"public":PUBLIC
"protected":PROTECTED
"private":PRIVATE
"static":STATIC
"abstract":ABSTRACT
"final":FINAL
"native":NATIVE
"synchronized":SYNCHRONIZED
"transient":TRANSIENT
"volatile":VOLATILE
"class":CLASS
"extends":EXTENDS
"implements":IMPLEMENTS
"void":VOID
"throws":THROWS
"this":THIS
"super":SUPER
"interface":INTERFACE
"if":IF
"else":ELSE
"switch":SWITCH
"case":CASE
"default":DEFAULT
"do":DO
"while":WHILE
"for":FOR
"break":BREAK
"continue":CONTINUE
"return":RETURN
"throw":THROW
"try":TRY
"catch":CATCH
"finally":FINALLY
"assert":ASSERT
"new":NEW
"\+\+":PLUSPLUS
"\-\-":MINUSMINUS
"\+":PLUS
"\-":MINUS
"~":COMP
"!":NOT
"\/":DIV
"\%":MOD
"<<":LSHIFT
">>":RSHIFT
">>>":URSHIFT
"\<\<=":LSHIFTEQ
"\>\>=":RSHIFTEQ
"\>\>\>=":URSHIFTEQ
"\<=":LTEQ
"\>=":GTEQ
"\<":LT
"\>":GT
"instanceof":INSTANCEOF
"==":EQEQ
"!=":NOTEQ
"&&":ANDAND
"\|\|":OROR
"&":AND
"\^":XOR
"\|":OR
"\?":QUESTION
"\*=":MULTEQ
"\/=":DIVEQ
"%=":MODEQ
"\+=":PLUSEQ
"-=":MINUSEQ
"&=":ANDEQ
"\^=":XOREQ
"\|=":OREQ
"0x[0-9A-Fa-f]+|[0-9]+":INTEGER_LITERAL
"[0-9]+\.[0-9]+([eE][0-9]+)?[fFdD]?|[0-9]+[eE][0-9]+[fFdD]?":FLOATING_POINT_LITERAL
"(true|false)":BOOLEAN_LITERAL
"null":NULL_LITERAL
"[a-zA-Z_][a-zA-Z0-9_]*":IDENTIFIER
"const":CONST
"goto":GOTO
"strictfp":STRICTFP
"ellipsis":ELLIPSIS
"enum":ENUM
"[ \\t]+":<ws>
"[\\n\\r]":<return>
""",
"Java"
) | 0.507812 | 0.051966 |
import torch
import torch.nn as nn
from abc import ABC
class BaseNet(nn.Module, ABC):
def __init__(self, num_state, seed):
super(BaseNet, self).__init__()
# set seed
torch.manual_seed(seed)
self.num_hidden = 256
self.base = nn.Sequential(
nn.Linear(in_features=num_state, out_features=self.num_hidden),
nn.SELU(),
nn.Linear(in_features=self.num_hidden, out_features=self.num_hidden),
nn.SELU(),
nn.Linear(in_features=self.num_hidden, out_features=self.num_hidden),
nn.SELU()
)
for param in self.parameters():
if len(param.shape) == 2:
torch.nn.init.kaiming_normal_(param, mode='fan_in', nonlinearity='linear')
def forward(self, state):
if len(state.shape) == 1:
state = state.unsqueeze(dim=0)
return self.base(state)
class BC(BaseNet):
def __init__(self, num_state, num_actions, seed):
super(BC, self).__init__(num_state, seed)
self.out = nn.Linear(in_features=self.num_hidden, out_features=num_actions)
for param in self.out.parameters():
if len(param.shape) > 1:
torch.nn.init.kaiming_normal_(param, mode='fan_in', nonlinearity='linear')
def forward(self, state):
state = super(BC, self).forward(state)
return self.out(state)
class Embedding(nn.Module):
def __init__(self, num_state, num_embedding, seed):
super(Embedding, self).__init__()
# set seed
torch.manual_seed(seed)
self.num_hidden = 256
self.net = nn.Sequential(
nn.Linear(in_features=num_state, out_features=self.num_hidden),
nn.SELU(),
nn.Linear(in_features=self.num_hidden, out_features=self.num_hidden),
nn.SELU(),
nn.Linear(in_features=self.num_hidden, out_features=num_embedding))
for param in self.parameters():
if len(param.shape) > 1:
torch.nn.init.kaiming_normal_(param, mode='fan_in', nonlinearity='linear')
def embed(self, state):
return self.net(state)
def forward(self, states):
(s1, s2) = states
embedding_1 = self.embed(s1)
embedding_2 = self.embed(s2)
# calculate cosine similarities between embeddings -> (-1, 1)
out = torch.diag(embedding_1 @ embedding_2.T, diagonal=0) / (torch.linalg.norm(embedding_1, dim=1) *
torch.linalg.norm(embedding_2, dim=1))
# change output range to (0, 1) with sigmoid to be applicable to bcelosswithlogits
return (out + 1.) / 2.
class AutoEncoder(nn.Module):
def __init__(self, num_state, num_embedding, seed):
super(AutoEncoder, self).__init__()
# set seed
torch.manual_seed(seed)
self.num_hidden = 256
self.embedding = nn.Sequential(
nn.Linear(in_features=num_state, out_features=self.num_hidden),
nn.SELU(),
nn.Linear(in_features=self.num_hidden, out_features=num_embedding))
self.out = nn.Sequential(
nn.Linear(in_features=num_embedding, out_features=self.num_hidden),
nn.SELU(),
nn.Linear(in_features=self.num_hidden, out_features=num_state))
for param in self.parameters():
if len(param.shape) > 1:
torch.nn.init.kaiming_normal_(param, mode='fan_in', nonlinearity='linear')
def embed(self, state):
return self.embedding(state)
def forward(self, state):
embedding = self.embed(state)
return self.out(embedding) | source/offline_ds_evaluation/networks.py | import torch
import torch.nn as nn
from abc import ABC
class BaseNet(nn.Module, ABC):
def __init__(self, num_state, seed):
super(BaseNet, self).__init__()
# set seed
torch.manual_seed(seed)
self.num_hidden = 256
self.base = nn.Sequential(
nn.Linear(in_features=num_state, out_features=self.num_hidden),
nn.SELU(),
nn.Linear(in_features=self.num_hidden, out_features=self.num_hidden),
nn.SELU(),
nn.Linear(in_features=self.num_hidden, out_features=self.num_hidden),
nn.SELU()
)
for param in self.parameters():
if len(param.shape) == 2:
torch.nn.init.kaiming_normal_(param, mode='fan_in', nonlinearity='linear')
def forward(self, state):
if len(state.shape) == 1:
state = state.unsqueeze(dim=0)
return self.base(state)
class BC(BaseNet):
def __init__(self, num_state, num_actions, seed):
super(BC, self).__init__(num_state, seed)
self.out = nn.Linear(in_features=self.num_hidden, out_features=num_actions)
for param in self.out.parameters():
if len(param.shape) > 1:
torch.nn.init.kaiming_normal_(param, mode='fan_in', nonlinearity='linear')
def forward(self, state):
state = super(BC, self).forward(state)
return self.out(state)
class Embedding(nn.Module):
def __init__(self, num_state, num_embedding, seed):
super(Embedding, self).__init__()
# set seed
torch.manual_seed(seed)
self.num_hidden = 256
self.net = nn.Sequential(
nn.Linear(in_features=num_state, out_features=self.num_hidden),
nn.SELU(),
nn.Linear(in_features=self.num_hidden, out_features=self.num_hidden),
nn.SELU(),
nn.Linear(in_features=self.num_hidden, out_features=num_embedding))
for param in self.parameters():
if len(param.shape) > 1:
torch.nn.init.kaiming_normal_(param, mode='fan_in', nonlinearity='linear')
def embed(self, state):
return self.net(state)
def forward(self, states):
(s1, s2) = states
embedding_1 = self.embed(s1)
embedding_2 = self.embed(s2)
# calculate cosine similarities between embeddings -> (-1, 1)
out = torch.diag(embedding_1 @ embedding_2.T, diagonal=0) / (torch.linalg.norm(embedding_1, dim=1) *
torch.linalg.norm(embedding_2, dim=1))
# change output range to (0, 1) with sigmoid to be applicable to bcelosswithlogits
return (out + 1.) / 2.
class AutoEncoder(nn.Module):
def __init__(self, num_state, num_embedding, seed):
super(AutoEncoder, self).__init__()
# set seed
torch.manual_seed(seed)
self.num_hidden = 256
self.embedding = nn.Sequential(
nn.Linear(in_features=num_state, out_features=self.num_hidden),
nn.SELU(),
nn.Linear(in_features=self.num_hidden, out_features=num_embedding))
self.out = nn.Sequential(
nn.Linear(in_features=num_embedding, out_features=self.num_hidden),
nn.SELU(),
nn.Linear(in_features=self.num_hidden, out_features=num_state))
for param in self.parameters():
if len(param.shape) > 1:
torch.nn.init.kaiming_normal_(param, mode='fan_in', nonlinearity='linear')
def embed(self, state):
return self.embedding(state)
def forward(self, state):
embedding = self.embed(state)
return self.out(embedding) | 0.938322 | 0.423518 |
import torch
from torch.nn.utils.rnn import pad_sequence
from transformer import (
Transformer,
add_eos,
add_sos,
decoder_padding_mask,
encoder_padding_mask,
generate_square_subsequent_mask,
)
def test_encoder_padding_mask():
supervisions = {
"sequence_idx": torch.tensor([0, 1, 2]),
"start_frame": torch.tensor([0, 0, 0]),
"num_frames": torch.tensor([18, 7, 13]),
}
max_len = ((18 - 1) // 2 - 1) // 2
mask = encoder_padding_mask(max_len, supervisions)
expected_mask = torch.tensor(
[
[False, False, False], # ((18 - 1)//2 - 1)//2 = 3,
[False, True, True], # ((7 - 1)//2 - 1)//2 = 1,
[False, False, True], # ((13 - 1)//2 - 1)//2 = 2,
]
)
assert torch.all(torch.eq(mask, expected_mask))
def test_transformer():
num_features = 40
num_classes = 87
model = Transformer(num_features=num_features, num_classes=num_classes)
N = 31
for T in range(7, 30):
x = torch.rand(N, T, num_features)
y, _, _ = model(x)
assert y.shape == (N, (((T - 1) // 2) - 1) // 2, num_classes)
def test_generate_square_subsequent_mask():
s = 5
mask = generate_square_subsequent_mask(s)
inf = float("inf")
expected_mask = torch.tensor(
[
[0.0, -inf, -inf, -inf, -inf],
[0.0, 0.0, -inf, -inf, -inf],
[0.0, 0.0, 0.0, -inf, -inf],
[0.0, 0.0, 0.0, 0.0, -inf],
[0.0, 0.0, 0.0, 0.0, 0.0],
]
)
assert torch.all(torch.eq(mask, expected_mask))
def test_decoder_padding_mask():
x = [torch.tensor([1, 2]), torch.tensor([3]), torch.tensor([2, 5, 8])]
y = pad_sequence(x, batch_first=True, padding_value=-1)
mask = decoder_padding_mask(y, ignore_id=-1)
expected_mask = torch.tensor(
[
[False, False, True],
[False, True, True],
[False, False, False],
]
)
assert torch.all(torch.eq(mask, expected_mask))
def test_add_sos():
x = [[1, 2], [3], [2, 5, 8]]
y = add_sos(x, sos_id=0)
expected_y = [[0, 1, 2], [0, 3], [0, 2, 5, 8]]
assert y == expected_y
def test_add_eos():
x = [[1, 2], [3], [2, 5, 8]]
y = add_eos(x, eos_id=0)
expected_y = [[1, 2, 0], [3, 0], [2, 5, 8, 0]]
assert y == expected_y | egs/librispeech/ASR/conformer_ctc/test_transformer.py |
import torch
from torch.nn.utils.rnn import pad_sequence
from transformer import (
Transformer,
add_eos,
add_sos,
decoder_padding_mask,
encoder_padding_mask,
generate_square_subsequent_mask,
)
def test_encoder_padding_mask():
supervisions = {
"sequence_idx": torch.tensor([0, 1, 2]),
"start_frame": torch.tensor([0, 0, 0]),
"num_frames": torch.tensor([18, 7, 13]),
}
max_len = ((18 - 1) // 2 - 1) // 2
mask = encoder_padding_mask(max_len, supervisions)
expected_mask = torch.tensor(
[
[False, False, False], # ((18 - 1)//2 - 1)//2 = 3,
[False, True, True], # ((7 - 1)//2 - 1)//2 = 1,
[False, False, True], # ((13 - 1)//2 - 1)//2 = 2,
]
)
assert torch.all(torch.eq(mask, expected_mask))
def test_transformer():
num_features = 40
num_classes = 87
model = Transformer(num_features=num_features, num_classes=num_classes)
N = 31
for T in range(7, 30):
x = torch.rand(N, T, num_features)
y, _, _ = model(x)
assert y.shape == (N, (((T - 1) // 2) - 1) // 2, num_classes)
def test_generate_square_subsequent_mask():
s = 5
mask = generate_square_subsequent_mask(s)
inf = float("inf")
expected_mask = torch.tensor(
[
[0.0, -inf, -inf, -inf, -inf],
[0.0, 0.0, -inf, -inf, -inf],
[0.0, 0.0, 0.0, -inf, -inf],
[0.0, 0.0, 0.0, 0.0, -inf],
[0.0, 0.0, 0.0, 0.0, 0.0],
]
)
assert torch.all(torch.eq(mask, expected_mask))
def test_decoder_padding_mask():
x = [torch.tensor([1, 2]), torch.tensor([3]), torch.tensor([2, 5, 8])]
y = pad_sequence(x, batch_first=True, padding_value=-1)
mask = decoder_padding_mask(y, ignore_id=-1)
expected_mask = torch.tensor(
[
[False, False, True],
[False, True, True],
[False, False, False],
]
)
assert torch.all(torch.eq(mask, expected_mask))
def test_add_sos():
x = [[1, 2], [3], [2, 5, 8]]
y = add_sos(x, sos_id=0)
expected_y = [[0, 1, 2], [0, 3], [0, 2, 5, 8]]
assert y == expected_y
def test_add_eos():
x = [[1, 2], [3], [2, 5, 8]]
y = add_eos(x, eos_id=0)
expected_y = [[1, 2, 0], [3, 0], [2, 5, 8, 0]]
assert y == expected_y | 0.703549 | 0.669421 |
import argparse
import json
import pprint
import requests
import sys
import urllib
import random
import os
API_KEY = os.environ["DINECISION_API_KEY"]
from urllib.error import HTTPError
from urllib.parse import quote
from urllib.parse import urlencode
# API constants, you shouldn't have to change these.
API_HOST = 'https://api.yelp.com'
SEARCH_PATH = '/v3/businesses/search'
BUSINESS_PATH = '/v3/businesses/' # Business ID will come after slash.
SEARCH_LIMIT = 5
def yelprequest(host, path, api_key, url_params=None):
url_params = url_params or {}
url = '{0}{1}'.format(host, quote(path.encode('utf8')))
headers = {
'Authorization': 'Bearer %s' % api_key,
}
print(u'Querying {0} ...'.format(url))
response = requests.request('GET', url, headers=headers, params=url_params)
return response.json()
def main():
location_input = input("Please enter the area you want to search for (e.g. 3 Times Square, New York City): ")
rating_input = input("Do you care about ratings (e.g. 4 or 4.5): ")
price_input = input("Do you care about price (e.g. 1 is the lowest, 4 is the highest): ")
url_params = {
'location': location_input.replace(' ', '+'),
'radius': 500,
'is_closed': "false",
'rating': rating_input,
'limit': SEARCH_LIMIT,
'categories': "restaurants, All",
'price': price_input
}
result = yelprequest(API_HOST, SEARCH_PATH, API_KEY, url_params)
business_list = result["businesses"]
random_business = random.choice(business_list)
print("Please go to " + random_business["name"] + " !")
Show_more = input("Do you want to learn more about it (y/n): ")
if Show_more == "y":
print(random_business["name"] + ", located at " + str(random_business["location"]['display_address'][0]) + ", " + str(random_business["location"]['state']) + " " + str(random_business["location"]['zip_code']))
else:
print("enjoy!")
if __name__ == '__main__':
main() | app/DineCision.py | import argparse
import json
import pprint
import requests
import sys
import urllib
import random
import os
API_KEY = os.environ["DINECISION_API_KEY"]
from urllib.error import HTTPError
from urllib.parse import quote
from urllib.parse import urlencode
# API constants, you shouldn't have to change these.
API_HOST = 'https://api.yelp.com'
SEARCH_PATH = '/v3/businesses/search'
BUSINESS_PATH = '/v3/businesses/' # Business ID will come after slash.
SEARCH_LIMIT = 5
def yelprequest(host, path, api_key, url_params=None):
url_params = url_params or {}
url = '{0}{1}'.format(host, quote(path.encode('utf8')))
headers = {
'Authorization': 'Bearer %s' % api_key,
}
print(u'Querying {0} ...'.format(url))
response = requests.request('GET', url, headers=headers, params=url_params)
return response.json()
def main():
location_input = input("Please enter the area you want to search for (e.g. 3 Times Square, New York City): ")
rating_input = input("Do you care about ratings (e.g. 4 or 4.5): ")
price_input = input("Do you care about price (e.g. 1 is the lowest, 4 is the highest): ")
url_params = {
'location': location_input.replace(' ', '+'),
'radius': 500,
'is_closed': "false",
'rating': rating_input,
'limit': SEARCH_LIMIT,
'categories': "restaurants, All",
'price': price_input
}
result = yelprequest(API_HOST, SEARCH_PATH, API_KEY, url_params)
business_list = result["businesses"]
random_business = random.choice(business_list)
print("Please go to " + random_business["name"] + " !")
Show_more = input("Do you want to learn more about it (y/n): ")
if Show_more == "y":
print(random_business["name"] + ", located at " + str(random_business["location"]['display_address'][0]) + ", " + str(random_business["location"]['state']) + " " + str(random_business["location"]['zip_code']))
else:
print("enjoy!")
if __name__ == '__main__':
main() | 0.134776 | 0.066995 |
from __future__ import annotations
from typing import Optional, TYPE_CHECKING, Union
# noinspection PyPackageRequirements
from pyspark.sql.types import StructType, DataType
from spark_auto_mapper_fhir.fhir_types.boolean import FhirBoolean
from spark_auto_mapper_fhir.fhir_types.date import FhirDate
from spark_auto_mapper_fhir.fhir_types.list import FhirList
from spark_auto_mapper_fhir.complex_types.meta import Meta
from spark_auto_mapper_fhir.extensions.extension_base import ExtensionBase
from spark_auto_mapper_fhir.fhir_types.id import FhirId
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
from spark_auto_mapper_fhir.base_types.fhir_resource_base import FhirResourceBase
from spark_fhir_schemas.r4.resources.relatedperson import RelatedPersonSchema
if TYPE_CHECKING:
pass
# id_ (id)
# meta (Meta)
# implicitRules (uri)
# language (CommonLanguages)
from spark_auto_mapper_fhir.value_sets.common_languages import CommonLanguagesCode
# text (Narrative)
from spark_auto_mapper_fhir.complex_types.narrative import Narrative
# contained (ResourceContainer)
from spark_auto_mapper_fhir.complex_types.resource_container import (
ResourceContainer,
)
# extension (Extension)
# modifierExtension (Extension)
# identifier (Identifier)
from spark_auto_mapper_fhir.complex_types.identifier import Identifier
# active (boolean)
# patient (Reference)
from spark_auto_mapper_fhir.complex_types.reference import Reference
# Imports for References for patient
from spark_auto_mapper_fhir.resources.patient import Patient
# relationship (CodeableConcept)
from spark_auto_mapper_fhir.complex_types.codeable_concept import CodeableConcept
# Import for CodeableConcept for relationship
from spark_auto_mapper_fhir.value_sets.patient_relationship_type import (
PatientRelationshipTypeCode,
)
# End Import for CodeableConcept for relationship
# name (HumanName)
from spark_auto_mapper_fhir.complex_types.human_name import HumanName
# telecom (ContactPoint)
from spark_auto_mapper_fhir.complex_types.contact_point import ContactPoint
# gender (AdministrativeGender)
from spark_auto_mapper_fhir.value_sets.administrative_gender import (
AdministrativeGenderCode,
)
# birthDate (date)
# address (Address)
from spark_auto_mapper_fhir.complex_types.address import Address
# photo (Attachment)
from spark_auto_mapper_fhir.complex_types.attachment import Attachment
# period (Period)
from spark_auto_mapper_fhir.complex_types.period import Period
# communication (RelatedPerson.Communication)
from spark_auto_mapper_fhir.backbone_elements.related_person_communication import (
RelatedPersonCommunication,
)
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class RelatedPerson(FhirResourceBase):
"""
RelatedPerson
relatedperson.xsd
Information about a person that is involved in the care for a patient, but who
is not the target of healthcare, nor has a formal responsibility in the care
process.
If the element is present, it must have either a @value, an @id, or extensions
"""
# noinspection PyPep8Naming
def __init__(
self,
*,
id_: Optional[FhirId] = None,
meta: Optional[Meta] = None,
implicitRules: Optional[FhirUri] = None,
language: Optional[CommonLanguagesCode] = None,
text: Optional[Narrative] = None,
contained: Optional[FhirList[ResourceContainer]] = None,
extension: Optional[FhirList[ExtensionBase]] = None,
modifierExtension: Optional[FhirList[ExtensionBase]] = None,
identifier: Optional[FhirList[Identifier]] = None,
active: Optional[FhirBoolean] = None,
patient: Reference[Patient],
relationship: Optional[
FhirList[CodeableConcept[PatientRelationshipTypeCode]]
] = None,
name: Optional[FhirList[HumanName]] = None,
telecom: Optional[FhirList[ContactPoint]] = None,
gender: Optional[AdministrativeGenderCode] = None,
birthDate: Optional[FhirDate] = None,
address: Optional[FhirList[Address]] = None,
photo: Optional[FhirList[Attachment]] = None,
period: Optional[Period] = None,
communication: Optional[FhirList[RelatedPersonCommunication]] = None,
) -> None:
"""
Information about a person that is involved in the care for a patient, but who
is not the target of healthcare, nor has a formal responsibility in the care
process.
If the element is present, it must have either a @value, an @id, or extensions
:param id_: The logical id of the resource, as used in the URL for the resource. Once
assigned, this value never changes.
:param meta: The metadata about the resource. This is content that is maintained by the
infrastructure. Changes to the content might not always be associated with
version changes to the resource.
:param implicitRules: A reference to a set of rules that were followed when the resource was
constructed, and which must be understood when processing the content. Often,
this is a reference to an implementation guide that defines the special rules
along with other profiles etc.
:param language: The base language in which the resource is written.
:param text: A human-readable narrative that contains a summary of the resource and can be
used to represent the content of the resource to a human. The narrative need
not encode all the structured data, but is required to contain sufficient
detail to make it "clinically safe" for a human to just read the narrative.
Resource definitions may define what content should be represented in the
narrative to ensure clinical safety.
:param contained: These resources do not have an independent existence apart from the resource
that contains them - they cannot be identified independently, and nor can they
have their own independent transaction scope.
:param extension: May be used to represent additional information that is not part of the basic
definition of the resource. To make the use of extensions safe and manageable,
there is a strict set of governance applied to the definition and use of
extensions. Though any implementer can define an extension, there is a set of
requirements that SHALL be met as part of the definition of the extension.
:param modifierExtension: May be used to represent additional information that is not part of the basic
definition of the resource and that modifies the understanding of the element
that contains it and/or the understanding of the containing element's
descendants. Usually modifier elements provide negation or qualification. To
make the use of extensions safe and manageable, there is a strict set of
governance applied to the definition and use of extensions. Though any
implementer is allowed to define an extension, there is a set of requirements
that SHALL be met as part of the definition of the extension. Applications
processing a resource are required to check for modifier extensions.
Modifier extensions SHALL NOT change the meaning of any elements on Resource
or DomainResource (including cannot change the meaning of modifierExtension
itself).
:param identifier: Identifier for a person within a particular scope.
:param active: Whether this related person record is in active use.
:param patient: The patient this person is related to.
:param relationship: The nature of the relationship between a patient and the related person.
:param name: A name associated with the person.
:param telecom: A contact detail for the person, e.g. a telephone number or an email address.
:param gender: Administrative Gender - the gender that the person is considered to have for
administration and record keeping purposes.
:param birthDate: The date on which the related person was born.
:param address: Address where the related person can be contacted or visited.
:param photo: Image of the person.
:param period: The period of time during which this relationship is or was active. If there
are no dates defined, then the interval is unknown.
:param communication: A language which may be used to communicate with about the patient's health.
"""
super().__init__(
resourceType="RelatedPerson",
id_=id_,
meta=meta,
implicitRules=implicitRules,
language=language,
text=text,
contained=contained,
extension=extension,
modifierExtension=modifierExtension,
identifier=identifier,
active=active,
patient=patient,
relationship=relationship,
name=name,
telecom=telecom,
gender=gender,
birthDate=birthDate,
address=address,
photo=photo,
period=period,
communication=communication,
)
def get_schema(
self, include_extension: bool
) -> Optional[Union[StructType, DataType]]:
return RelatedPersonSchema.get_schema(include_extension=include_extension) | spark_auto_mapper_fhir/resources/related_person.py | from __future__ import annotations
from typing import Optional, TYPE_CHECKING, Union
# noinspection PyPackageRequirements
from pyspark.sql.types import StructType, DataType
from spark_auto_mapper_fhir.fhir_types.boolean import FhirBoolean
from spark_auto_mapper_fhir.fhir_types.date import FhirDate
from spark_auto_mapper_fhir.fhir_types.list import FhirList
from spark_auto_mapper_fhir.complex_types.meta import Meta
from spark_auto_mapper_fhir.extensions.extension_base import ExtensionBase
from spark_auto_mapper_fhir.fhir_types.id import FhirId
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
from spark_auto_mapper_fhir.base_types.fhir_resource_base import FhirResourceBase
from spark_fhir_schemas.r4.resources.relatedperson import RelatedPersonSchema
if TYPE_CHECKING:
pass
# id_ (id)
# meta (Meta)
# implicitRules (uri)
# language (CommonLanguages)
from spark_auto_mapper_fhir.value_sets.common_languages import CommonLanguagesCode
# text (Narrative)
from spark_auto_mapper_fhir.complex_types.narrative import Narrative
# contained (ResourceContainer)
from spark_auto_mapper_fhir.complex_types.resource_container import (
ResourceContainer,
)
# extension (Extension)
# modifierExtension (Extension)
# identifier (Identifier)
from spark_auto_mapper_fhir.complex_types.identifier import Identifier
# active (boolean)
# patient (Reference)
from spark_auto_mapper_fhir.complex_types.reference import Reference
# Imports for References for patient
from spark_auto_mapper_fhir.resources.patient import Patient
# relationship (CodeableConcept)
from spark_auto_mapper_fhir.complex_types.codeable_concept import CodeableConcept
# Import for CodeableConcept for relationship
from spark_auto_mapper_fhir.value_sets.patient_relationship_type import (
PatientRelationshipTypeCode,
)
# End Import for CodeableConcept for relationship
# name (HumanName)
from spark_auto_mapper_fhir.complex_types.human_name import HumanName
# telecom (ContactPoint)
from spark_auto_mapper_fhir.complex_types.contact_point import ContactPoint
# gender (AdministrativeGender)
from spark_auto_mapper_fhir.value_sets.administrative_gender import (
AdministrativeGenderCode,
)
# birthDate (date)
# address (Address)
from spark_auto_mapper_fhir.complex_types.address import Address
# photo (Attachment)
from spark_auto_mapper_fhir.complex_types.attachment import Attachment
# period (Period)
from spark_auto_mapper_fhir.complex_types.period import Period
# communication (RelatedPerson.Communication)
from spark_auto_mapper_fhir.backbone_elements.related_person_communication import (
RelatedPersonCommunication,
)
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class RelatedPerson(FhirResourceBase):
"""
RelatedPerson
relatedperson.xsd
Information about a person that is involved in the care for a patient, but who
is not the target of healthcare, nor has a formal responsibility in the care
process.
If the element is present, it must have either a @value, an @id, or extensions
"""
# noinspection PyPep8Naming
def __init__(
self,
*,
id_: Optional[FhirId] = None,
meta: Optional[Meta] = None,
implicitRules: Optional[FhirUri] = None,
language: Optional[CommonLanguagesCode] = None,
text: Optional[Narrative] = None,
contained: Optional[FhirList[ResourceContainer]] = None,
extension: Optional[FhirList[ExtensionBase]] = None,
modifierExtension: Optional[FhirList[ExtensionBase]] = None,
identifier: Optional[FhirList[Identifier]] = None,
active: Optional[FhirBoolean] = None,
patient: Reference[Patient],
relationship: Optional[
FhirList[CodeableConcept[PatientRelationshipTypeCode]]
] = None,
name: Optional[FhirList[HumanName]] = None,
telecom: Optional[FhirList[ContactPoint]] = None,
gender: Optional[AdministrativeGenderCode] = None,
birthDate: Optional[FhirDate] = None,
address: Optional[FhirList[Address]] = None,
photo: Optional[FhirList[Attachment]] = None,
period: Optional[Period] = None,
communication: Optional[FhirList[RelatedPersonCommunication]] = None,
) -> None:
"""
Information about a person that is involved in the care for a patient, but who
is not the target of healthcare, nor has a formal responsibility in the care
process.
If the element is present, it must have either a @value, an @id, or extensions
:param id_: The logical id of the resource, as used in the URL for the resource. Once
assigned, this value never changes.
:param meta: The metadata about the resource. This is content that is maintained by the
infrastructure. Changes to the content might not always be associated with
version changes to the resource.
:param implicitRules: A reference to a set of rules that were followed when the resource was
constructed, and which must be understood when processing the content. Often,
this is a reference to an implementation guide that defines the special rules
along with other profiles etc.
:param language: The base language in which the resource is written.
:param text: A human-readable narrative that contains a summary of the resource and can be
used to represent the content of the resource to a human. The narrative need
not encode all the structured data, but is required to contain sufficient
detail to make it "clinically safe" for a human to just read the narrative.
Resource definitions may define what content should be represented in the
narrative to ensure clinical safety.
:param contained: These resources do not have an independent existence apart from the resource
that contains them - they cannot be identified independently, and nor can they
have their own independent transaction scope.
:param extension: May be used to represent additional information that is not part of the basic
definition of the resource. To make the use of extensions safe and manageable,
there is a strict set of governance applied to the definition and use of
extensions. Though any implementer can define an extension, there is a set of
requirements that SHALL be met as part of the definition of the extension.
:param modifierExtension: May be used to represent additional information that is not part of the basic
definition of the resource and that modifies the understanding of the element
that contains it and/or the understanding of the containing element's
descendants. Usually modifier elements provide negation or qualification. To
make the use of extensions safe and manageable, there is a strict set of
governance applied to the definition and use of extensions. Though any
implementer is allowed to define an extension, there is a set of requirements
that SHALL be met as part of the definition of the extension. Applications
processing a resource are required to check for modifier extensions.
Modifier extensions SHALL NOT change the meaning of any elements on Resource
or DomainResource (including cannot change the meaning of modifierExtension
itself).
:param identifier: Identifier for a person within a particular scope.
:param active: Whether this related person record is in active use.
:param patient: The patient this person is related to.
:param relationship: The nature of the relationship between a patient and the related person.
:param name: A name associated with the person.
:param telecom: A contact detail for the person, e.g. a telephone number or an email address.
:param gender: Administrative Gender - the gender that the person is considered to have for
administration and record keeping purposes.
:param birthDate: The date on which the related person was born.
:param address: Address where the related person can be contacted or visited.
:param photo: Image of the person.
:param period: The period of time during which this relationship is or was active. If there
are no dates defined, then the interval is unknown.
:param communication: A language which may be used to communicate with about the patient's health.
"""
super().__init__(
resourceType="RelatedPerson",
id_=id_,
meta=meta,
implicitRules=implicitRules,
language=language,
text=text,
contained=contained,
extension=extension,
modifierExtension=modifierExtension,
identifier=identifier,
active=active,
patient=patient,
relationship=relationship,
name=name,
telecom=telecom,
gender=gender,
birthDate=birthDate,
address=address,
photo=photo,
period=period,
communication=communication,
)
def get_schema(
self, include_extension: bool
) -> Optional[Union[StructType, DataType]]:
return RelatedPersonSchema.get_schema(include_extension=include_extension) | 0.90185 | 0.277173 |
from weld.grizzly.core.indexes.base import Index
class ColumnIndex(Index):
"""
An index used for columns in a Grizzly DataFrame.
Each index value is a Python object. For operations between two DataFrames
with the same ColumnIndex, the result will also have the same index. For
operations between two DataFrames with different ColumnIndex, the output
will have a join of the two ColumnIndex, sorted by the index values.
Two ColumnIndex are equal if their index names are equal and have the same
order.
Parameters
----------
columns : iterable
column names.
slots : iterable of int or None
slots associated with each column. If provided, the length must be
len(columns). This is used for underlying data access only; index
equality depends only on the column names and ordering.
Examples
--------
>>> ColumnIndex(["name", "age"])
ColumnIndex(['name', 'age'], [0, 1])
>>> ColumnIndex(["name", "age"], slots=[1, 0])
ColumnIndex(['name', 'age'], [1, 0])
>>> ColumnIndex(["name", "age"], slots=[1, 2])
Traceback (most recent call last):
...
ValueError: slots must be contiguous starting at 0
"""
def __init__(self, columns, slots=None):
if not isinstance(columns, list):
columns = list(columns)
if slots is not None:
assert len(columns) == len(slots)
sorted_slots = sorted(slots)
# Make sure each slot is occupied/there are no "holes".
if not sorted_slots == list(range(len(slots))):
raise ValueError("slots must be contiguous starting at 0")
else:
slots = range(len(columns))
# The original column order.
self.columns = columns
# The mapping from columns to slots.
self.index = dict(zip(columns, slots))
def __iter__(self):
"""
Iterates over columns in the order in which they appear in a DataFrame.
Examples
--------
>>> x = ColumnIndex(["name", "age"], slots=[1, 0])
>>> [name for name in x]
['name', 'age']
"""
for col in self.columns:
yield col
def zip(self, other):
"""
Zips this index with 'other', returning an iterator of `(name,
slot_in_self, slot_in_other)`. The slot may be `None` if the name does
not appear in either column.
The result columns are ordered in a way consistent with how DataFrame
columns should be be ordered (i.e., same order `self` if `self ==
other`, and sorted by the union of columns from `self` and `other`
otherwise).
Examples
--------
>>> a = ColumnIndex(["name", "age"])
>>> b = ColumnIndex(["name", "age"])
>>> list(a.zip(b))
[('name', 0, 0), ('age', 1, 1)]
>>> b = ColumnIndex(["income", "age", "name"])
>>> list(a.zip(b))
[('age', 1, 1), ('income', None, 0), ('name', 0, 2)]
"""
if self == other:
for name in self.columns:
yield (name, self.index[name], other.index[name])
else:
columns = sorted(list(set(self.columns).union(other.columns)))
for name in columns:
yield (name, self.index.get(name), other.index.get(name))
def __getitem__(self, key):
"""
Get the slot for a paritcular column name.
Examples
--------
>>> a = ColumnIndex(["name", "age"])
>>> a["age"]
1
"""
return self.index[key]
def append(self, key):
"""
Add a new column to the index. The slot is set to be `len(columns) - 1`.
Examples
--------
>>> a = ColumnIndex(["name", "age"])
>>> a.append("income")
>>> a["income"]
2
"""
self.index[key] = len(self.columns)
self.columns.append(key)
def __eq__(self, other):
"""
Compare equality depending on column names.
Examples
--------
>>> a = ColumnIndex(["name", "age"])
>>> a == ColumnIndex(["name", "age"])
True
>>> a == ColumnIndex(["age", "name"])
False
>>> a == ColumnIndex(["name", "age", "income"])
False
"""
return isinstance(other, ColumnIndex) and self.columns == other.columns
def __str__(self):
return repr(self)
def __repr__(self):
return "ColumnIndex({}, {})".format(self.columns, [self.index[col] for col in self.columns]) | weld-python/weld/grizzly/core/indexes/column.py | from weld.grizzly.core.indexes.base import Index
class ColumnIndex(Index):
"""
An index used for columns in a Grizzly DataFrame.
Each index value is a Python object. For operations between two DataFrames
with the same ColumnIndex, the result will also have the same index. For
operations between two DataFrames with different ColumnIndex, the output
will have a join of the two ColumnIndex, sorted by the index values.
Two ColumnIndex are equal if their index names are equal and have the same
order.
Parameters
----------
columns : iterable
column names.
slots : iterable of int or None
slots associated with each column. If provided, the length must be
len(columns). This is used for underlying data access only; index
equality depends only on the column names and ordering.
Examples
--------
>>> ColumnIndex(["name", "age"])
ColumnIndex(['name', 'age'], [0, 1])
>>> ColumnIndex(["name", "age"], slots=[1, 0])
ColumnIndex(['name', 'age'], [1, 0])
>>> ColumnIndex(["name", "age"], slots=[1, 2])
Traceback (most recent call last):
...
ValueError: slots must be contiguous starting at 0
"""
def __init__(self, columns, slots=None):
if not isinstance(columns, list):
columns = list(columns)
if slots is not None:
assert len(columns) == len(slots)
sorted_slots = sorted(slots)
# Make sure each slot is occupied/there are no "holes".
if not sorted_slots == list(range(len(slots))):
raise ValueError("slots must be contiguous starting at 0")
else:
slots = range(len(columns))
# The original column order.
self.columns = columns
# The mapping from columns to slots.
self.index = dict(zip(columns, slots))
def __iter__(self):
"""
Iterates over columns in the order in which they appear in a DataFrame.
Examples
--------
>>> x = ColumnIndex(["name", "age"], slots=[1, 0])
>>> [name for name in x]
['name', 'age']
"""
for col in self.columns:
yield col
def zip(self, other):
"""
Zips this index with 'other', returning an iterator of `(name,
slot_in_self, slot_in_other)`. The slot may be `None` if the name does
not appear in either column.
The result columns are ordered in a way consistent with how DataFrame
columns should be be ordered (i.e., same order `self` if `self ==
other`, and sorted by the union of columns from `self` and `other`
otherwise).
Examples
--------
>>> a = ColumnIndex(["name", "age"])
>>> b = ColumnIndex(["name", "age"])
>>> list(a.zip(b))
[('name', 0, 0), ('age', 1, 1)]
>>> b = ColumnIndex(["income", "age", "name"])
>>> list(a.zip(b))
[('age', 1, 1), ('income', None, 0), ('name', 0, 2)]
"""
if self == other:
for name in self.columns:
yield (name, self.index[name], other.index[name])
else:
columns = sorted(list(set(self.columns).union(other.columns)))
for name in columns:
yield (name, self.index.get(name), other.index.get(name))
def __getitem__(self, key):
"""
Get the slot for a paritcular column name.
Examples
--------
>>> a = ColumnIndex(["name", "age"])
>>> a["age"]
1
"""
return self.index[key]
def append(self, key):
"""
Add a new column to the index. The slot is set to be `len(columns) - 1`.
Examples
--------
>>> a = ColumnIndex(["name", "age"])
>>> a.append("income")
>>> a["income"]
2
"""
self.index[key] = len(self.columns)
self.columns.append(key)
def __eq__(self, other):
"""
Compare equality depending on column names.
Examples
--------
>>> a = ColumnIndex(["name", "age"])
>>> a == ColumnIndex(["name", "age"])
True
>>> a == ColumnIndex(["age", "name"])
False
>>> a == ColumnIndex(["name", "age", "income"])
False
"""
return isinstance(other, ColumnIndex) and self.columns == other.columns
def __str__(self):
return repr(self)
def __repr__(self):
return "ColumnIndex({}, {})".format(self.columns, [self.index[col] for col in self.columns]) | 0.901271 | 0.865963 |
def merge_sorted_arrays(arrays):
if not len(arrays):
return arrays
# O(K) Time & Space
def create_min_heap_from_first_element(arrays):
min_heap = ModifiedMinHeap()
for i in range(len(arrays)):
# node_config = [initial_element, sub_array_idx,
# initial_idx, sub_array_length]
node_config = [arrays[i][0], i, 0, len(arrays[i])]
min_heap.add_node(node_config)
min_heap.head = min_heap.heap[0]
return min_heap
def merge_and_sort(arrays, min_heap):
merged_array = []
while min_heap.head is not None:
head = min_heap.head
merged_array.append(head.value)
head.idx += 1
if head.idx < head.limit:
head.value = arrays[head.sub_array_idx][head.idx]
min_heap.siftDown(0)
min_heap.head = min_heap.heap[0]
else:
min_heap.removeHead()
return merged_array
class ModifiedMinHeap:
class MinHeapNode:
def __init__(self, config):
value, sub_array_idx, idx, limit = config
self.value = value
self.sub_array_idx = sub_array_idx
self.idx = idx
self.limit = limit
def __init__(self):
self.heap = []
self.head = None
def add_node(self, node_config):
node = self.MinHeapNode(node_config)
self.heap.append(node)
self.sift_up(-1)
def sift_down(self, start_index):
heap = self.heap
child_one_index = 2 * start_index + 1
child_two_index = 2 * start_index + 2
while child_one_index < len(heap):
if child_two_index < len(heap):
if heap[child_one_index].value <= heap[child_two_index].value and \
heap[start_index].value > heap[child_one_index].value:
new_index = child_one_index
elif heap[child_one_index].value > heap[child_two_index].value and \
heap[start_index].value > heap[child_two_index].value:
new_index = child_two_index
else:
break
else:
if heap[start_index].value > heap[child_one_index].value:
new_index = child_one_index
else:
break
self.swap(start_index, new_index, heap)
start_index = new_index
child_one_index = 2 * start_index + 1
child_two_index = 2 * start_index + 2
def remove_head(self):
if self.head is not None:
if len(self.heap) > 1:
self.swap(0, len(self.heap) - 1, self.heap)
self.heap.pop()
self.sift_down(0)
self.head = self.heap[0]
else:
self.head = None
self.heap.pop()
def sift_up(self, idx):
if idx < 0:
idx = len(self.heap) + idx
while idx > 0:
parent_idx = (idx - 1) // 2
if self.heap[idx].value < self.heap[parent_idx].value:
self.swap(idx, parent_idx, self.heap)
idx = parent_idx
else:
break
def swap(self, i, j, array):
array[i], array[j] = array[j], array[i]
search_heap = create_min_heap_from_first_element(arrays)
merged_sorted_array = merge_and_sort(arrays, search_heap)
return merged_sorted_array | algorithms/merge/array_merge_algorithms.py |
def merge_sorted_arrays(arrays):
if not len(arrays):
return arrays
# O(K) Time & Space
def create_min_heap_from_first_element(arrays):
min_heap = ModifiedMinHeap()
for i in range(len(arrays)):
# node_config = [initial_element, sub_array_idx,
# initial_idx, sub_array_length]
node_config = [arrays[i][0], i, 0, len(arrays[i])]
min_heap.add_node(node_config)
min_heap.head = min_heap.heap[0]
return min_heap
def merge_and_sort(arrays, min_heap):
merged_array = []
while min_heap.head is not None:
head = min_heap.head
merged_array.append(head.value)
head.idx += 1
if head.idx < head.limit:
head.value = arrays[head.sub_array_idx][head.idx]
min_heap.siftDown(0)
min_heap.head = min_heap.heap[0]
else:
min_heap.removeHead()
return merged_array
class ModifiedMinHeap:
class MinHeapNode:
def __init__(self, config):
value, sub_array_idx, idx, limit = config
self.value = value
self.sub_array_idx = sub_array_idx
self.idx = idx
self.limit = limit
def __init__(self):
self.heap = []
self.head = None
def add_node(self, node_config):
node = self.MinHeapNode(node_config)
self.heap.append(node)
self.sift_up(-1)
def sift_down(self, start_index):
heap = self.heap
child_one_index = 2 * start_index + 1
child_two_index = 2 * start_index + 2
while child_one_index < len(heap):
if child_two_index < len(heap):
if heap[child_one_index].value <= heap[child_two_index].value and \
heap[start_index].value > heap[child_one_index].value:
new_index = child_one_index
elif heap[child_one_index].value > heap[child_two_index].value and \
heap[start_index].value > heap[child_two_index].value:
new_index = child_two_index
else:
break
else:
if heap[start_index].value > heap[child_one_index].value:
new_index = child_one_index
else:
break
self.swap(start_index, new_index, heap)
start_index = new_index
child_one_index = 2 * start_index + 1
child_two_index = 2 * start_index + 2
def remove_head(self):
if self.head is not None:
if len(self.heap) > 1:
self.swap(0, len(self.heap) - 1, self.heap)
self.heap.pop()
self.sift_down(0)
self.head = self.heap[0]
else:
self.head = None
self.heap.pop()
def sift_up(self, idx):
if idx < 0:
idx = len(self.heap) + idx
while idx > 0:
parent_idx = (idx - 1) // 2
if self.heap[idx].value < self.heap[parent_idx].value:
self.swap(idx, parent_idx, self.heap)
idx = parent_idx
else:
break
def swap(self, i, j, array):
array[i], array[j] = array[j], array[i]
search_heap = create_min_heap_from_first_element(arrays)
merged_sorted_array = merge_and_sort(arrays, search_heap)
return merged_sorted_array | 0.509032 | 0.292317 |
import os
import sys
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("tetrautils")
logger.setLevel(logging.INFO)
TEST_RESULTS_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "test_results"
)
MENDER_QA_TEST_SUITES = [
{
"id": 1,
"name": "test_accep_qemux86_64_uefi_grub",
"job": "test_accep_qemux86_64_uefi_grub",
"results_file": "results_accep_qemux86_64_uefi_grub",
},
{
"id": 2,
"name": "test_accep_vexpress_qemu",
"job": "test_accep_vexpress_qemu",
"results_file": "results_accep_vexpress_qemu",
},
{
"id": 3,
"name": "test_accep_qemux86_64_bios_grub",
"job": "test_accep_qemux86_64_bios_grub",
"results_file": "results_accep_qemux86_64_bios_grub",
},
{
"id": 4,
"name": "test_accep_qemux86_64_bios_grub_gpt",
"job": "test_accep_qemux86_64_bios_grub_gpt",
"results_file": "results_accep_qemux86_64_bios_grub_gpt",
},
{
"id": 5,
"name": "test_accep_vexpress_qemu_uboot_uefi_grub",
"job": "test_accep_vexpress_qemu_uboot_uefi_grub",
"results_file": "results_accep_vexpress_qemu_uboot_uefi_grub",
},
{
"id": 6,
"name": "test_accep_vexpress_qemu_flash",
"job": "test_accep_vexpress_qemu_flash",
"results_file": "results_accep_vexpress_qemu_flash",
},
{
"id": 7,
"name": "test_backend_integration_open",
"job": "test_backend_integration_open_source",
"results_file": "results_backend_integration_open",
},
{
"id": 8,
"name": "test_backend_integration_enterprise",
"job": "test_backend_integration_enterprise",
"results_file": "results_backend_integration_enterprise",
},
{
"id": 9,
"name": "test_full_integration",
"job": "test_full_integration_open_source",
"results_file": "results_full_integration",
},
{
"id": 10,
"name": "test_full_integration_enterprise",
"job": "test_full_integration_enterprise",
"results_file": "results_full_integration",
},
]
_TETRA_API_HOST = os.getenv("TETRA_API_HOST", "http://localhost")
_TETRA_API_BASE_URL = "{}/api/".format(_TETRA_API_HOST)
TETRA_API_PROJECTS_URL = _TETRA_API_BASE_URL + "projects"
TETRA_API_BUILDS_URL_FMT = _TETRA_API_BASE_URL + "projects/{project_id}/builds"
TETRA_API_RESULTS_URL_FMT = (
_TETRA_API_BASE_URL + "projects/{project_id}/builds/{build_id}/results"
)
def get_tetra_credentials():
user = os.getenv("TETRA_USER")
password = os.getenv("TETRA_PASSWORD")
if user is None or password is None:
logger.warning("TETRA_USER or TETRA_PASSWORD not found in user environment")
return user, password | scripts/common.py |
import os
import sys
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("tetrautils")
logger.setLevel(logging.INFO)
TEST_RESULTS_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "test_results"
)
MENDER_QA_TEST_SUITES = [
{
"id": 1,
"name": "test_accep_qemux86_64_uefi_grub",
"job": "test_accep_qemux86_64_uefi_grub",
"results_file": "results_accep_qemux86_64_uefi_grub",
},
{
"id": 2,
"name": "test_accep_vexpress_qemu",
"job": "test_accep_vexpress_qemu",
"results_file": "results_accep_vexpress_qemu",
},
{
"id": 3,
"name": "test_accep_qemux86_64_bios_grub",
"job": "test_accep_qemux86_64_bios_grub",
"results_file": "results_accep_qemux86_64_bios_grub",
},
{
"id": 4,
"name": "test_accep_qemux86_64_bios_grub_gpt",
"job": "test_accep_qemux86_64_bios_grub_gpt",
"results_file": "results_accep_qemux86_64_bios_grub_gpt",
},
{
"id": 5,
"name": "test_accep_vexpress_qemu_uboot_uefi_grub",
"job": "test_accep_vexpress_qemu_uboot_uefi_grub",
"results_file": "results_accep_vexpress_qemu_uboot_uefi_grub",
},
{
"id": 6,
"name": "test_accep_vexpress_qemu_flash",
"job": "test_accep_vexpress_qemu_flash",
"results_file": "results_accep_vexpress_qemu_flash",
},
{
"id": 7,
"name": "test_backend_integration_open",
"job": "test_backend_integration_open_source",
"results_file": "results_backend_integration_open",
},
{
"id": 8,
"name": "test_backend_integration_enterprise",
"job": "test_backend_integration_enterprise",
"results_file": "results_backend_integration_enterprise",
},
{
"id": 9,
"name": "test_full_integration",
"job": "test_full_integration_open_source",
"results_file": "results_full_integration",
},
{
"id": 10,
"name": "test_full_integration_enterprise",
"job": "test_full_integration_enterprise",
"results_file": "results_full_integration",
},
]
_TETRA_API_HOST = os.getenv("TETRA_API_HOST", "http://localhost")
_TETRA_API_BASE_URL = "{}/api/".format(_TETRA_API_HOST)
TETRA_API_PROJECTS_URL = _TETRA_API_BASE_URL + "projects"
TETRA_API_BUILDS_URL_FMT = _TETRA_API_BASE_URL + "projects/{project_id}/builds"
TETRA_API_RESULTS_URL_FMT = (
_TETRA_API_BASE_URL + "projects/{project_id}/builds/{build_id}/results"
)
def get_tetra_credentials():
user = os.getenv("TETRA_USER")
password = os.getenv("TETRA_PASSWORD")
if user is None or password is None:
logger.warning("TETRA_USER or TETRA_PASSWORD not found in user environment")
return user, password | 0.204263 | 0.196942 |
from typing import TYPE_CHECKING, Dict, Tuple
if TYPE_CHECKING:
from core.cell import Cell
class Distances:
"""Gives distances for all cells linked to a starting cell, called root.
This datastructure starts at a `root` cell and gives the distance
from all cells linked to the root to the root. So, root -> A -> B
results in:
cells[root] = 0
cells[A] = 1
cells[B] = 2
TODO: Bulding the distances structure should probably happen here, and not in cell.
"""
def __init__(self, root: "Cell") -> None:
self.root: "Cell" = root
self.cells: Dict["Cell", int] = {}
self.cells[root] = 0
def __getitem__(self, key: "Cell") -> int:
return self.cells[key]
def __setitem__(self, key: "Cell", val: int) -> None:
self.cells[key] = val
def __contains__(self, key: "Cell") -> bool:
return key in self.cells
def get_path_to(self, goal: "Cell") -> "Distances":
"""Finds the shortest path from root to goal
Uses simplified dijkstra to find the shortest path from root to goal, and returns
this as a distance map, that can be handed of to a grid. Described on page 42.
"""
current = goal
breadcrumbs = Distances(self.root)
breadcrumbs[current] = self.cells[current]
while current is not self.root:
for neighbor in current.links:
if self.cells[neighbor] < self.cells[current]:
breadcrumbs[neighbor] = self.cells[neighbor]
current = neighbor
break
return breadcrumbs
@property
def max(self) -> Tuple["Cell", int]:
"""Returns the cell, and how far away it is, furthest away from the root."""
max_distance = 0
max_cell = self.root
for cell, distance in self.cells.items():
if distance > max_distance:
max_cell = cell
max_distance = distance
return (max_cell, max_distance)
def get_cells(self):
return self.cells.keys() | core/distances.py | from typing import TYPE_CHECKING, Dict, Tuple
if TYPE_CHECKING:
from core.cell import Cell
class Distances:
"""Gives distances for all cells linked to a starting cell, called root.
This datastructure starts at a `root` cell and gives the distance
from all cells linked to the root to the root. So, root -> A -> B
results in:
cells[root] = 0
cells[A] = 1
cells[B] = 2
TODO: Bulding the distances structure should probably happen here, and not in cell.
"""
def __init__(self, root: "Cell") -> None:
self.root: "Cell" = root
self.cells: Dict["Cell", int] = {}
self.cells[root] = 0
def __getitem__(self, key: "Cell") -> int:
return self.cells[key]
def __setitem__(self, key: "Cell", val: int) -> None:
self.cells[key] = val
def __contains__(self, key: "Cell") -> bool:
return key in self.cells
def get_path_to(self, goal: "Cell") -> "Distances":
"""Finds the shortest path from root to goal
Uses simplified dijkstra to find the shortest path from root to goal, and returns
this as a distance map, that can be handed of to a grid. Described on page 42.
"""
current = goal
breadcrumbs = Distances(self.root)
breadcrumbs[current] = self.cells[current]
while current is not self.root:
for neighbor in current.links:
if self.cells[neighbor] < self.cells[current]:
breadcrumbs[neighbor] = self.cells[neighbor]
current = neighbor
break
return breadcrumbs
@property
def max(self) -> Tuple["Cell", int]:
"""Returns the cell, and how far away it is, furthest away from the root."""
max_distance = 0
max_cell = self.root
for cell, distance in self.cells.items():
if distance > max_distance:
max_cell = cell
max_distance = distance
return (max_cell, max_distance)
def get_cells(self):
return self.cells.keys() | 0.820793 | 0.69633 |
from pyfiler.setup_worker import SetupWorker
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
import os
import json
import argparse
import time
class MyHandler(FileSystemEventHandler):
"""subclass of FileSystemEventHandler, implements on_modified()."""
def __init__(self, watch_dir, config_obj):
self.watch_dir = watch_dir
self.config_obj = config_obj
def on_modified(self, event):
"""a method triggered when a file is saved or moved into the dir"""
prefix_options = self.config_obj.keys()
for new_file in os.listdir(self.watch_dir):
new_file = str(new_file)
for prefix in prefix_options:
prefix = str(prefix)
if new_file.startswith(prefix):
srcpath = self.watch_dir + '/' + new_file
dstpath = self.config_obj[prefix] + '/' + new_file
os.rename(srcpath, dstpath)
print("[*] successfully renamed:")
print("[*] src: " + srcpath)
print("[*] dest: " + dstpath)
def main(args):
# specify the source dir to watch over
if args.watch_dir is not None:
watch_dir = args.watch_dir
else:
watch_dir = "/Users/cameron.merrick/Downloads/test"
# load in the config file with the filetypes and routes defined
with open("/Users/cameron.merrick/code/pyfiler/pyfiler/data/config.json", "r") as read_file:
config_obj = json.load(read_file)
# create the object that scans the filesystem and creates necessary dirs from the config
setupworker = SetupWorker(config_obj)
needed_dirs = setupworker.exec_setup_process(dry_run=args.dry) # defaults to true to be safe
setupworker.create_missing_dirs(needed_dirs)
print("[*] made it through the main method successfully.")
# now set up the wathdog worker to watch over the directory for changes (new downloaded files)
myhandler = MyHandler(watch_dir, config_obj)
observer = Observer()
# next give the observer the MyHandler object which is subclass of Filesystemeventhandler
observer.schedule(myhandler, watch_dir, recursive=True)
# start it up
observer.start()
# create the loop that will enable the watchdog worker to remain alive
try:
while True:
# poll watch_dir every 10 sec unless default changed in CLI args
time.sleep(int(args.interval))
except KeyboardInterrupt:
observer.stop()
observer.join
if __name__ == '__main__':
"""the start of the program."""
# create the argparser and create some optional arguments
parser = argparse.ArgumentParser(description='main method to invoke pyfiler', add_help=True)
parser.add_argument('-w', '--watch_dir', type=str, help='specify a directory to monitor', metavar='')
parser.add_argument('-d', '--dry', action='store_true', help='toggle the script to run in dry_run mode')
parser.add_argument('-i', '--interval', type=int, default=10, help='interval (seconds) between poll requests by the watcher', metavar='')
# add an output volume control M.E. argument group
me_group = parser.add_mutually_exclusive_group()
me_group.add_argument('-v', '--verbose', action='store_true', help='toggle verbose outputs to stdout')
me_group.add_argument('-q', '--quiet', action='store_true', help='redirect stdout messages to log file')
# now collect all the arguments and pass the object to main()
args = parser.parse_args()
main(args) | pyfiler/__main__.py | from pyfiler.setup_worker import SetupWorker
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
import os
import json
import argparse
import time
class MyHandler(FileSystemEventHandler):
"""subclass of FileSystemEventHandler, implements on_modified()."""
def __init__(self, watch_dir, config_obj):
self.watch_dir = watch_dir
self.config_obj = config_obj
def on_modified(self, event):
"""a method triggered when a file is saved or moved into the dir"""
prefix_options = self.config_obj.keys()
for new_file in os.listdir(self.watch_dir):
new_file = str(new_file)
for prefix in prefix_options:
prefix = str(prefix)
if new_file.startswith(prefix):
srcpath = self.watch_dir + '/' + new_file
dstpath = self.config_obj[prefix] + '/' + new_file
os.rename(srcpath, dstpath)
print("[*] successfully renamed:")
print("[*] src: " + srcpath)
print("[*] dest: " + dstpath)
def main(args):
# specify the source dir to watch over
if args.watch_dir is not None:
watch_dir = args.watch_dir
else:
watch_dir = "/Users/cameron.merrick/Downloads/test"
# load in the config file with the filetypes and routes defined
with open("/Users/cameron.merrick/code/pyfiler/pyfiler/data/config.json", "r") as read_file:
config_obj = json.load(read_file)
# create the object that scans the filesystem and creates necessary dirs from the config
setupworker = SetupWorker(config_obj)
needed_dirs = setupworker.exec_setup_process(dry_run=args.dry) # defaults to true to be safe
setupworker.create_missing_dirs(needed_dirs)
print("[*] made it through the main method successfully.")
# now set up the wathdog worker to watch over the directory for changes (new downloaded files)
myhandler = MyHandler(watch_dir, config_obj)
observer = Observer()
# next give the observer the MyHandler object which is subclass of Filesystemeventhandler
observer.schedule(myhandler, watch_dir, recursive=True)
# start it up
observer.start()
# create the loop that will enable the watchdog worker to remain alive
try:
while True:
# poll watch_dir every 10 sec unless default changed in CLI args
time.sleep(int(args.interval))
except KeyboardInterrupt:
observer.stop()
observer.join
if __name__ == '__main__':
"""the start of the program."""
# create the argparser and create some optional arguments
parser = argparse.ArgumentParser(description='main method to invoke pyfiler', add_help=True)
parser.add_argument('-w', '--watch_dir', type=str, help='specify a directory to monitor', metavar='')
parser.add_argument('-d', '--dry', action='store_true', help='toggle the script to run in dry_run mode')
parser.add_argument('-i', '--interval', type=int, default=10, help='interval (seconds) between poll requests by the watcher', metavar='')
# add an output volume control M.E. argument group
me_group = parser.add_mutually_exclusive_group()
me_group.add_argument('-v', '--verbose', action='store_true', help='toggle verbose outputs to stdout')
me_group.add_argument('-q', '--quiet', action='store_true', help='redirect stdout messages to log file')
# now collect all the arguments and pass the object to main()
args = parser.parse_args()
main(args) | 0.399577 | 0.076857 |
from __future__ import print_function
import os
import subprocess
import sys
import yaml
from bcbiovm.docker import manage, mounts
DEFAULT_IMAGE = "quay.io/bcbio/bcbio-vc"
def full(args, dockerconf):
"""Full installaction of docker image and data.
"""
updates = []
args = add_install_defaults(args)
if args.wrapper:
updates.append("wrapper scripts")
upgrade_bcbio_vm()
dmounts = mounts.prepare_system(args.datadir, dockerconf["biodata_dir"])
if args.install_tools:
updates.append("bcbio-nextgen code and third party tools")
pull(args, dockerconf)
_check_docker_image(args)
# Ensure external galaxy configuration in sync when doing tool upgrade
manage.run_bcbio_cmd(args.image, dmounts, ["upgrade"])
if args.install_data:
if len(args.genomes) == 0:
print("Data not installed, no genomes provided with `--genomes` flag")
sys.exit(1)
elif len(args.aligners) == 0:
print("Data not installed, no aligners provided with `--aligners` flag")
sys.exit(1)
else:
updates.append("biological data")
if _check_docker_image(args, raise_error=False):
manage.run_bcbio_cmd(args.image, dmounts, _get_cl(args))
else:
args.upgrade = False
args.tools = False
args.tooldir = False
args.toolplus = False
args.isolate = True
args.distribution = None
args.cwl = True
print(args)
from bcbio import install
install.upgrade_bcbio(args)
_save_install_defaults(args)
if updates:
print("\nbcbio-nextgen-vm updated with latest %s" % " and ".join(updates))
else:
print("\nNo update targets specified, need '--wrapper', '--tools' or '--data'\n"
"See 'bcbio_vm.py upgrade -h' for more details.")
def _get_cl(args):
clargs = ["upgrade"]
if args.install_data:
clargs.append("--data")
for g in args.genomes:
clargs.extend(["--genomes", g])
for a in args.aligners:
clargs.extend(["--aligners", a])
for t in args.datatarget:
clargs.extend(["--datatarget", t])
return clargs
def upgrade_bcbio_vm():
"""Upgrade bcbio-nextgen-vm wrapper code.
"""
conda_bin = os.path.join(os.path.dirname(os.path.realpath(sys.executable)), "conda")
if not os.path.exists(conda_bin):
print("Cannot update bcbio-nextgen-vm; not installed with conda")
else:
subprocess.check_call([conda_bin, "install", "-y", "-c", "conda-forge", "-c", "bioconda",
"bcbio-nextgen-vm", "bcbio-nextgen", "cwltool", "arvados-cwl-runner",
"toil", "cromwell"])
def pull(args, dockerconf):
"""Pull down latest docker image.
"""
print("Retrieving bcbio-nextgen docker image with code and tools")
assert args.image, "Unspecified image name for docker import"
subprocess.check_call(["docker", "pull", args.image])
def _save_install_defaults(args):
"""Save arguments passed to installation to be used on subsequent upgrades.
Avoids needing to re-include genomes and aligners on command line.
"""
install_config = _get_config_file(args)
if install_config is None:
return
if os.path.exists(install_config) and os.path.getsize(install_config) > 0:
with open(install_config) as in_handle:
cur_config = yaml.safe_load(in_handle)
else:
cur_config = {}
for attr in ["genomes", "aligners"]:
if not cur_config.get(attr):
cur_config[attr] = []
for x in getattr(args, attr):
if x not in cur_config[attr]:
cur_config[attr].append(str(x))
if args.image != DEFAULT_IMAGE and args.image:
cur_config["image"] = args.image
with open(install_config, "w") as out_handle:
yaml.dump(cur_config, out_handle, default_flow_style=False, allow_unicode=False)
def _get_install_defaults(args):
install_config = _get_config_file(args)
if install_config and os.path.exists(install_config) and os.path.getsize(install_config) > 0:
with open(install_config) as in_handle:
return yaml.safe_load(in_handle)
return {}
def _add_docker_defaults(args, default_args):
if not hasattr(args, "image") or not args.image:
if default_args.get("image") and not default_args.get("images") == "None":
args.image = default_args["image"]
else:
args.image = DEFAULT_IMAGE
return args
def add_install_defaults(args):
"""Add previously saved installation defaults to command line arguments.
"""
default_args = _get_install_defaults(args)
for attr in ["genomes", "aligners"]:
for x in default_args.get(attr, []):
new_val = getattr(args, attr)
if x not in getattr(args, attr):
new_val.append(x)
setattr(args, attr, new_val)
args = _add_docker_defaults(args, default_args)
return args
def _check_docker_image(args, raise_error=True):
"""Ensure docker image exists.
"""
a_tag = None
a_image = args.image
if ":" in a_image:
(a_image,a_tag) = a_image.split(":")
for image in subprocess.check_output(["docker", "images"]).decode(errors="ignore").split("\n"):
parts = image.split()
if len(parts) > 1 and parts[0] == a_image:
if not a_tag or a_tag and parts[1] == a_tag:
return True
if raise_error:
raise ValueError("Could not find docker image %s in local repository" % args.image)
def docker_image_arg(args):
if not hasattr(args, "image") or not args.image:
default_args = _get_install_defaults(args)
args = _add_docker_defaults(args, default_args)
_check_docker_image(args)
return args
def _get_config_file(args):
config_dir = os.path.join(args.datadir, "config")
if not os.path.exists(config_dir):
os.makedirs(config_dir)
return os.path.join(config_dir, "install-params.yaml") | bcbiovm/docker/install.py | from __future__ import print_function
import os
import subprocess
import sys
import yaml
from bcbiovm.docker import manage, mounts
DEFAULT_IMAGE = "quay.io/bcbio/bcbio-vc"
def full(args, dockerconf):
"""Full installaction of docker image and data.
"""
updates = []
args = add_install_defaults(args)
if args.wrapper:
updates.append("wrapper scripts")
upgrade_bcbio_vm()
dmounts = mounts.prepare_system(args.datadir, dockerconf["biodata_dir"])
if args.install_tools:
updates.append("bcbio-nextgen code and third party tools")
pull(args, dockerconf)
_check_docker_image(args)
# Ensure external galaxy configuration in sync when doing tool upgrade
manage.run_bcbio_cmd(args.image, dmounts, ["upgrade"])
if args.install_data:
if len(args.genomes) == 0:
print("Data not installed, no genomes provided with `--genomes` flag")
sys.exit(1)
elif len(args.aligners) == 0:
print("Data not installed, no aligners provided with `--aligners` flag")
sys.exit(1)
else:
updates.append("biological data")
if _check_docker_image(args, raise_error=False):
manage.run_bcbio_cmd(args.image, dmounts, _get_cl(args))
else:
args.upgrade = False
args.tools = False
args.tooldir = False
args.toolplus = False
args.isolate = True
args.distribution = None
args.cwl = True
print(args)
from bcbio import install
install.upgrade_bcbio(args)
_save_install_defaults(args)
if updates:
print("\nbcbio-nextgen-vm updated with latest %s" % " and ".join(updates))
else:
print("\nNo update targets specified, need '--wrapper', '--tools' or '--data'\n"
"See 'bcbio_vm.py upgrade -h' for more details.")
def _get_cl(args):
clargs = ["upgrade"]
if args.install_data:
clargs.append("--data")
for g in args.genomes:
clargs.extend(["--genomes", g])
for a in args.aligners:
clargs.extend(["--aligners", a])
for t in args.datatarget:
clargs.extend(["--datatarget", t])
return clargs
def upgrade_bcbio_vm():
"""Upgrade bcbio-nextgen-vm wrapper code.
"""
conda_bin = os.path.join(os.path.dirname(os.path.realpath(sys.executable)), "conda")
if not os.path.exists(conda_bin):
print("Cannot update bcbio-nextgen-vm; not installed with conda")
else:
subprocess.check_call([conda_bin, "install", "-y", "-c", "conda-forge", "-c", "bioconda",
"bcbio-nextgen-vm", "bcbio-nextgen", "cwltool", "arvados-cwl-runner",
"toil", "cromwell"])
def pull(args, dockerconf):
"""Pull down latest docker image.
"""
print("Retrieving bcbio-nextgen docker image with code and tools")
assert args.image, "Unspecified image name for docker import"
subprocess.check_call(["docker", "pull", args.image])
def _save_install_defaults(args):
"""Save arguments passed to installation to be used on subsequent upgrades.
Avoids needing to re-include genomes and aligners on command line.
"""
install_config = _get_config_file(args)
if install_config is None:
return
if os.path.exists(install_config) and os.path.getsize(install_config) > 0:
with open(install_config) as in_handle:
cur_config = yaml.safe_load(in_handle)
else:
cur_config = {}
for attr in ["genomes", "aligners"]:
if not cur_config.get(attr):
cur_config[attr] = []
for x in getattr(args, attr):
if x not in cur_config[attr]:
cur_config[attr].append(str(x))
if args.image != DEFAULT_IMAGE and args.image:
cur_config["image"] = args.image
with open(install_config, "w") as out_handle:
yaml.dump(cur_config, out_handle, default_flow_style=False, allow_unicode=False)
def _get_install_defaults(args):
install_config = _get_config_file(args)
if install_config and os.path.exists(install_config) and os.path.getsize(install_config) > 0:
with open(install_config) as in_handle:
return yaml.safe_load(in_handle)
return {}
def _add_docker_defaults(args, default_args):
if not hasattr(args, "image") or not args.image:
if default_args.get("image") and not default_args.get("images") == "None":
args.image = default_args["image"]
else:
args.image = DEFAULT_IMAGE
return args
def add_install_defaults(args):
"""Add previously saved installation defaults to command line arguments.
"""
default_args = _get_install_defaults(args)
for attr in ["genomes", "aligners"]:
for x in default_args.get(attr, []):
new_val = getattr(args, attr)
if x not in getattr(args, attr):
new_val.append(x)
setattr(args, attr, new_val)
args = _add_docker_defaults(args, default_args)
return args
def _check_docker_image(args, raise_error=True):
"""Ensure docker image exists.
"""
a_tag = None
a_image = args.image
if ":" in a_image:
(a_image,a_tag) = a_image.split(":")
for image in subprocess.check_output(["docker", "images"]).decode(errors="ignore").split("\n"):
parts = image.split()
if len(parts) > 1 and parts[0] == a_image:
if not a_tag or a_tag and parts[1] == a_tag:
return True
if raise_error:
raise ValueError("Could not find docker image %s in local repository" % args.image)
def docker_image_arg(args):
if not hasattr(args, "image") or not args.image:
default_args = _get_install_defaults(args)
args = _add_docker_defaults(args, default_args)
_check_docker_image(args)
return args
def _get_config_file(args):
config_dir = os.path.join(args.datadir, "config")
if not os.path.exists(config_dir):
os.makedirs(config_dir)
return os.path.join(config_dir, "install-params.yaml") | 0.312475 | 0.12544 |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import re
from nltk.corpus import stopwords
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.multiclass import OneVsRestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score ,confusion_matrix
# Code starts here
# load data
news = pd.read_csv(path)
# subset data
news = news[["TITLE", "CATEGORY"]]
# distribution of classes
dist = news["CATEGORY"].value_counts()
# display class distribution
print(dist)
print(news.head())
# display data
# Code ends here
# --------------
# Code starts here
# stopwords
stop = set(stopwords.words('english'))
# retain only alphabets
news['TITLE'] = news['TITLE'].apply(lambda x:re.sub("[^a-zA-Z]", " ",x))
# convert to lowercase and tokenize
news['TITLE'] = news['TITLE'].apply(lambda x:x.lower().split())
# remove stopwords
news['TITLE'] = news['TITLE'].apply(lambda x:[i for i in x if i not in stop])
# join list elements
news['TITLE']=news['TITLE'].apply(lambda x: ' '.join(x))
# split into training and test sets
X_train,X_test,y_train,y_test=train_test_split(news['TITLE'],news['CATEGORY'],test_size=0.2,random_state=3)
# Code ends here
# --------------
# Code starts here
# initialize count vectorizer
count_vectorizer = CountVectorizer()
# initialize tfidf vectorizer
tfidf_vectorizer = TfidfVectorizer(ngram_range=(1,3))
# fit and transform with count vectorizer
X_train_count = count_vectorizer.fit_transform(X_train)
X_test_count = count_vectorizer.fit_transform(X_test)
# fit and transform with tfidf vectorizer
X_train_tfidf = tfidf_vectorizer.fit_transform(X_train)
X_test_tfidf = tfidf_vectorizer.fit_transform(X_test)
# Code ends here
# --------------
# Code starts here
# initialize multinomial naive bayes
nb_1=MultinomialNB()
nb_2=MultinomialNB()
# fit on count vectorizer training data
nb_1.fit(X_train_count,Y_train)
# fit on tfidf vectorizer training data
nb_2.fit(X_train_tfidf,Y_train)
# accuracy with count vectorizer
acc_count_nb=accuracy_score(nb_1.predict(X_test_count), Y_test)
# accuracy with tfidf vectorizer
acc_tfidf_nb=accuracy_score(nb_2.predict(X_test_tfidf), Y_test)
# display accuracies
print(acc_count_nb)
print(acc_tfidf_nb)
# accuracy with tfidf vectorizer
# display accuracies
# Code ends here
# --------------
import warnings
warnings.filterwarnings('ignore')
# initialize logistic regression
logreg_1 = OneVsRestClassifier(LogisticRegression(random_state=10))
logreg_2 = OneVsRestClassifier(LogisticRegression(random_state=10))
# fit on count vectorizer training data
logreg_1.fit(X_train_count, Y_train )
# fit on tfidf vectorizer training data
logreg_2.fit(X_train_tfidf, Y_train)
# accuracy with count vectorizer
acc_count_logreg = accuracy_score(logreg_1.predict(X_test_count), Y_test)
# accuracy with tfidf vectorizer
acc_tfidf_logreg = accuracy_score(logreg_2.predict(X_test_tfidf), Y_test)
# display accuracies
print(acc_count_logreg)
print(acc_tfidf_logreg)
# Code ends here | NLP:-Classify-the-News-Articles/code.py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import re
from nltk.corpus import stopwords
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.multiclass import OneVsRestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score ,confusion_matrix
# Code starts here
# load data
news = pd.read_csv(path)
# subset data
news = news[["TITLE", "CATEGORY"]]
# distribution of classes
dist = news["CATEGORY"].value_counts()
# display class distribution
print(dist)
print(news.head())
# display data
# Code ends here
# --------------
# Code starts here
# stopwords
stop = set(stopwords.words('english'))
# retain only alphabets
news['TITLE'] = news['TITLE'].apply(lambda x:re.sub("[^a-zA-Z]", " ",x))
# convert to lowercase and tokenize
news['TITLE'] = news['TITLE'].apply(lambda x:x.lower().split())
# remove stopwords
news['TITLE'] = news['TITLE'].apply(lambda x:[i for i in x if i not in stop])
# join list elements
news['TITLE']=news['TITLE'].apply(lambda x: ' '.join(x))
# split into training and test sets
X_train,X_test,y_train,y_test=train_test_split(news['TITLE'],news['CATEGORY'],test_size=0.2,random_state=3)
# Code ends here
# --------------
# Code starts here
# initialize count vectorizer
count_vectorizer = CountVectorizer()
# initialize tfidf vectorizer
tfidf_vectorizer = TfidfVectorizer(ngram_range=(1,3))
# fit and transform with count vectorizer
X_train_count = count_vectorizer.fit_transform(X_train)
X_test_count = count_vectorizer.fit_transform(X_test)
# fit and transform with tfidf vectorizer
X_train_tfidf = tfidf_vectorizer.fit_transform(X_train)
X_test_tfidf = tfidf_vectorizer.fit_transform(X_test)
# Code ends here
# --------------
# Code starts here
# initialize multinomial naive bayes
nb_1=MultinomialNB()
nb_2=MultinomialNB()
# fit on count vectorizer training data
nb_1.fit(X_train_count,Y_train)
# fit on tfidf vectorizer training data
nb_2.fit(X_train_tfidf,Y_train)
# accuracy with count vectorizer
acc_count_nb=accuracy_score(nb_1.predict(X_test_count), Y_test)
# accuracy with tfidf vectorizer
acc_tfidf_nb=accuracy_score(nb_2.predict(X_test_tfidf), Y_test)
# display accuracies
print(acc_count_nb)
print(acc_tfidf_nb)
# accuracy with tfidf vectorizer
# display accuracies
# Code ends here
# --------------
import warnings
warnings.filterwarnings('ignore')
# initialize logistic regression
logreg_1 = OneVsRestClassifier(LogisticRegression(random_state=10))
logreg_2 = OneVsRestClassifier(LogisticRegression(random_state=10))
# fit on count vectorizer training data
logreg_1.fit(X_train_count, Y_train )
# fit on tfidf vectorizer training data
logreg_2.fit(X_train_tfidf, Y_train)
# accuracy with count vectorizer
acc_count_logreg = accuracy_score(logreg_1.predict(X_test_count), Y_test)
# accuracy with tfidf vectorizer
acc_tfidf_logreg = accuracy_score(logreg_2.predict(X_test_tfidf), Y_test)
# display accuracies
print(acc_count_logreg)
print(acc_tfidf_logreg)
# Code ends here | 0.475849 | 0.433981 |
import sys, os, pdb
curr_path = os.getcwd();
sys.path.append(curr_path+'/..');
# Importing stuff from all folders in python path
import numpy as np
from focusfun import *
from refocus import *
from KSpaceFunctions import *
# TESTING CODE FOR FOCUS_DATA Below
import scipy.io as sio
from scipy.signal import hilbert, gausspulse
from scipy.interpolate import RectBivariateSpline
import matplotlib.pyplot as plt
# Methods of Recovery
#method = 'Adjoint';
method = 'Tikhonov';
# Pulse Definition
fc = 5.0e6; # Hz
fracBW = 0.7;
fs = 20e6; # Hz
# Create Pulse in Both Time and Frequency Domain
Nf = 1024; t = np.arange(-Nf,Nf+1)/fs; # (s) Time Vector centered about t=0
impResp = gausspulse(t, fc=fc, bw=fracBW); # Calculate Transmit Pulse
n = impResp.size; P_f = np.fft.fftshift(np.fft.fft(impResp));
f = np.mod(np.fft.fftshift(np.arange(n)*fs/n)+fs/2,fs)-fs/2;
P_f = (f/(f+fc/10))*np.abs(P_f);
P_f = P_f[f>0]; f = f[f>0];
# Aperture Definition
c = 1540; # m/usec
LAMBDA = c/fc;
elemSpace = 0.15e-3; # m
Nelem = 96;
xpos = np.arange(-(Nelem-1)/2, 1+(Nelem-1)/2)*elemSpace;
tx_origin_x = np.arange(-0.00365, 0.00370, 0.00005); # Transmit Origin in [m]
focDepth = 0.020; # m
# Transmit Apodization
X_XDCR, TX_ORIGIN_X = np.meshgrid(xpos, tx_origin_x);
rect = lambda x: np.heaviside(x+1/2,1/2)-np.heaviside(x-1/2,1/2);
sigma_rect = 0.008; # [m]
tx_apod = rect((X_XDCR-TX_ORIGIN_X)/sigma_rect);
# Simulation Space and Time
Nx0 = 256; m = 2; n = 2; dov = 0.060; # m
x = np.arange(-(Nx0*m-1)/2,1+(Nx0*m-1)/2)*(elemSpace/m);
Nu1 = np.round(dov/(elemSpace/n));
z = (np.arange(Nu1))*(elemSpace/n);
t = np.arange(0,2,0.05)*np.abs(focDepth)/c;
## Ground-Truth Multistatic-Transmit Synthetic Aperture
# Calculate [K-Space, Wavefield, etc.] for Each Individual Transmit Element
multistatic_pwResp = np.zeros((x.size, f.size, Nelem), dtype=np.complex); # Pulse-Wave Frequency Response
multistatic_kspace = np.zeros((z.size, x.size, Nelem), dtype=np.complex); # K-Space Response
for elem_idx in np.arange(Nelem):
single_element = np.zeros(Nelem);
single_element[elem_idx] = 1; # Single Element Apodization
# Pulse-Wave Frequency Response
kx, multistatic_pwResp[:,:,elem_idx] = \
pwResp(x, elemSpace, single_element, np.zeros(Nelem), P_f, f, c);
# K-Space Response
kz, multistatic_kspace[:,:,elem_idx] = \
pwResp2kSpace(kx, f, multistatic_pwResp[:,:,elem_idx], z, c);
Kx, Kz = np.meshgrid(kx, kz); # K-Space Grid
K = np.sqrt(Kx**2 + Kz**2); # Radius in K-Space
## Transmit Pulse-Wave Frequency Response for Each Transmit Beam
# Pulse-Wave Frequency Response for Each Transmit Beam
tx_pwResp = np.zeros((x.size, f.size, tx_origin_x.size), dtype=np.complex);
tx_delays = np.zeros((tx_origin_x.size, Nelem), dtype=np.complex);
for tx_origin_x_idx in np.arange(tx_origin_x.size):
# Calculating Transmit Delays for Each Transmit Beam
if np.isinf(focDepth):
tx_delays[tx_origin_x_idx, :] = np.zeros(xpos.shape);
else:
tx_delays[tx_origin_x_idx, :] = (np.sign(focDepth) * \
np.sqrt((xpos-tx_origin_x[tx_origin_x_idx])**2+focDepth**2)-focDepth)/c;
# Pulse-Wave Frequency Response for Each Transmit Beam
kx, tx_pwResp[:,:,tx_origin_x_idx] = pwResp(x, elemSpace, \
tx_apod[tx_origin_x_idx, :], tx_delays[tx_origin_x_idx, :], P_f, f, c);
# Calculate K-Space Response For Each Transmit Beam
tx_kspace = np.zeros((z.size, x.size, tx_origin_x.size), dtype=np.complex); # K-Space Response
for tx_origin_x_idx in np.arange(tx_origin_x.size): # K-Space Response
_, tx_kspace[:,:,tx_origin_x_idx] = \
pwResp2kSpace(kx, f, tx_pwResp[:,:,tx_origin_x_idx], z, c);
# Reconstruct Transmit Wavefield for Transmit Beam
tx_origin_x_idx = 74;
_, _, psf_t = kspace2wavefield(kx, kz, (Kz>0)*tx_kspace[:,:,tx_origin_x_idx], c, t);
# K-Space of a Single Transmit Beam
plt.figure(); imagesc(kx, kz, np.abs(tx_kspace[:,:,tx_origin_x_idx]), \
(0, np.max(np.abs(tx_kspace[:,:,tx_origin_x_idx]))) );
plt.xlabel('lateral frequency [1/m]');
plt.ylabel('axial frequency [1/m]');
plt.title('K-Space of Selected Transmit Beam');
## Simulate Multistatic Synthetic Aperture Recovery Techniques
# Decode Multistatic data Using REFoCUS
if method == 'Adjoint':
multistatic_recov_pwResp = \
multistatic_recov(kx, f, tx_pwResp, tx_apod, tx_delays, Hinv_adjoint, lambda f: 1);
elif method == 'Tikhonov':
multistatic_recov_pwResp = \
multistatic_recov(kx, f, tx_pwResp, tx_apod, tx_delays, Hinv_tikhonov, 1e-3);
# Calculate K-Space Responses For Each Recovered Element
multistatic_recov_kspace = np.zeros((z.size, x.size, Nelem), dtype=np.complex); # K-Space Response
for elem_idx in np.arange(Nelem): # K-Space Response
_, multistatic_recov_kspace[:,:,elem_idx] = \
pwResp2kSpace(kx, f, multistatic_recov_pwResp[:,:,elem_idx], z, c);
## K-Space and Wavefield for Single Element Transmits
# K-Space of the Adjoint-Based Transmit Response
plt.figure(); plt.subplot(1,2,1);
imagesc(kx, kz, np.mean(np.abs(multistatic_kspace), axis=2), \
(0,np.max(np.mean(np.abs(multistatic_kspace), axis=2))) );
plt.xlabel('lateral frequency [1/m]');
plt.ylabel('axial frequency [1/m]');
plt.title('K-Space of True Single Element Response');
# K-Space of the Ramp-Filtered Adjoint Transmit Response
plt.subplot(1,2,2);
imagesc(kx, kz, np.mean(np.abs(multistatic_recov_kspace), axis=2), \
(0,np.max(np.mean(np.abs(multistatic_recov_kspace), axis=2))) );
plt.xlabel('lateral frequency [1/m]');
plt.ylabel('axial frequency [1/m]');
plt.title('K-Space of Recovered Single Element Response');
plt.show();
# Wavefield Due to Each Individual Transmit Element
elem_idx = 48;
_, _, psf_t_recon = kspace2wavefield(kx, kz, (Kz>0)*multistatic_recov_kspace[:,:,elem_idx], c, t);
_, _, psf_t_true = kspace2wavefield(kx, kz, (Kz>0)*multistatic_kspace[:,:,elem_idx], c, t);
## Plotting the Resulting Wavefield
maxpsf_t_recon = np.max(np.abs(psf_t_recon[~np.isinf(psf_t_recon) & ~np.isnan(psf_t_recon)]));
maxpsf_t_true = np.max(np.abs(psf_t_true[~np.isinf(psf_t_true) & ~np.isnan(psf_t_true)]));
maxpsf_t = np.max(np.abs(psf_t[~np.isinf(psf_t) & ~np.isnan(psf_t)]));
plt.figure(); tpause = 1e-9; kk = 1;
while True:
plt.subplot(1,3,1);
imagesc(x,z,np.real(psf_t_true[:,:,kk]),0.1*maxpsf_t_true*np.array([-1,1]));
plt.ylabel('z Axial Distance (mm)');
plt.xlabel('x Azimuthal Distance (mm)');
plt.title('True Single Element Wavefield');
plt.subplot(1,3,2);
imagesc(x,z,np.real(psf_t_recon[:,:,kk]),0.1*maxpsf_t_recon*np.array([-1,1]));
plt.ylabel('z Axial Distance (mm)');
plt.xlabel('x Azimuthal Distance (mm)');
plt.title('Recovered Single Element Wavefield');
plt.subplot(1,3,3);
imagesc(x,z,np.real(psf_t[:,:,kk]),0.1*maxpsf_t*np.array([-1,1]));
plt.ylabel('z Axial Distance (mm)');
plt.xlabel('x Azimuthal Distance (mm)');
plt.title('Selected Transmit Beam');
if kk == t.size-1:
kk = 1;
else:
kk = kk + 1;
plt.draw();
plt.pause(tpause);
plt.clf(); | Python/kSpaceSimulations/KSpaceWalkingApertureFocusedTransmits.py | import sys, os, pdb
curr_path = os.getcwd();
sys.path.append(curr_path+'/..');
# Importing stuff from all folders in python path
import numpy as np
from focusfun import *
from refocus import *
from KSpaceFunctions import *
# TESTING CODE FOR FOCUS_DATA Below
import scipy.io as sio
from scipy.signal import hilbert, gausspulse
from scipy.interpolate import RectBivariateSpline
import matplotlib.pyplot as plt
# Methods of Recovery
#method = 'Adjoint';
method = 'Tikhonov';
# Pulse Definition
fc = 5.0e6; # Hz
fracBW = 0.7;
fs = 20e6; # Hz
# Create Pulse in Both Time and Frequency Domain
Nf = 1024; t = np.arange(-Nf,Nf+1)/fs; # (s) Time Vector centered about t=0
impResp = gausspulse(t, fc=fc, bw=fracBW); # Calculate Transmit Pulse
n = impResp.size; P_f = np.fft.fftshift(np.fft.fft(impResp));
f = np.mod(np.fft.fftshift(np.arange(n)*fs/n)+fs/2,fs)-fs/2;
P_f = (f/(f+fc/10))*np.abs(P_f);
P_f = P_f[f>0]; f = f[f>0];
# Aperture Definition
c = 1540; # m/usec
LAMBDA = c/fc;
elemSpace = 0.15e-3; # m
Nelem = 96;
xpos = np.arange(-(Nelem-1)/2, 1+(Nelem-1)/2)*elemSpace;
tx_origin_x = np.arange(-0.00365, 0.00370, 0.00005); # Transmit Origin in [m]
focDepth = 0.020; # m
# Transmit Apodization
X_XDCR, TX_ORIGIN_X = np.meshgrid(xpos, tx_origin_x);
rect = lambda x: np.heaviside(x+1/2,1/2)-np.heaviside(x-1/2,1/2);
sigma_rect = 0.008; # [m]
tx_apod = rect((X_XDCR-TX_ORIGIN_X)/sigma_rect);
# Simulation Space and Time
Nx0 = 256; m = 2; n = 2; dov = 0.060; # m
x = np.arange(-(Nx0*m-1)/2,1+(Nx0*m-1)/2)*(elemSpace/m);
Nu1 = np.round(dov/(elemSpace/n));
z = (np.arange(Nu1))*(elemSpace/n);
t = np.arange(0,2,0.05)*np.abs(focDepth)/c;
## Ground-Truth Multistatic-Transmit Synthetic Aperture
# Calculate [K-Space, Wavefield, etc.] for Each Individual Transmit Element
multistatic_pwResp = np.zeros((x.size, f.size, Nelem), dtype=np.complex); # Pulse-Wave Frequency Response
multistatic_kspace = np.zeros((z.size, x.size, Nelem), dtype=np.complex); # K-Space Response
for elem_idx in np.arange(Nelem):
single_element = np.zeros(Nelem);
single_element[elem_idx] = 1; # Single Element Apodization
# Pulse-Wave Frequency Response
kx, multistatic_pwResp[:,:,elem_idx] = \
pwResp(x, elemSpace, single_element, np.zeros(Nelem), P_f, f, c);
# K-Space Response
kz, multistatic_kspace[:,:,elem_idx] = \
pwResp2kSpace(kx, f, multistatic_pwResp[:,:,elem_idx], z, c);
Kx, Kz = np.meshgrid(kx, kz); # K-Space Grid
K = np.sqrt(Kx**2 + Kz**2); # Radius in K-Space
## Transmit Pulse-Wave Frequency Response for Each Transmit Beam
# Pulse-Wave Frequency Response for Each Transmit Beam
tx_pwResp = np.zeros((x.size, f.size, tx_origin_x.size), dtype=np.complex);
tx_delays = np.zeros((tx_origin_x.size, Nelem), dtype=np.complex);
for tx_origin_x_idx in np.arange(tx_origin_x.size):
# Calculating Transmit Delays for Each Transmit Beam
if np.isinf(focDepth):
tx_delays[tx_origin_x_idx, :] = np.zeros(xpos.shape);
else:
tx_delays[tx_origin_x_idx, :] = (np.sign(focDepth) * \
np.sqrt((xpos-tx_origin_x[tx_origin_x_idx])**2+focDepth**2)-focDepth)/c;
# Pulse-Wave Frequency Response for Each Transmit Beam
kx, tx_pwResp[:,:,tx_origin_x_idx] = pwResp(x, elemSpace, \
tx_apod[tx_origin_x_idx, :], tx_delays[tx_origin_x_idx, :], P_f, f, c);
# Calculate K-Space Response For Each Transmit Beam
tx_kspace = np.zeros((z.size, x.size, tx_origin_x.size), dtype=np.complex); # K-Space Response
for tx_origin_x_idx in np.arange(tx_origin_x.size): # K-Space Response
_, tx_kspace[:,:,tx_origin_x_idx] = \
pwResp2kSpace(kx, f, tx_pwResp[:,:,tx_origin_x_idx], z, c);
# Reconstruct Transmit Wavefield for Transmit Beam
tx_origin_x_idx = 74;
_, _, psf_t = kspace2wavefield(kx, kz, (Kz>0)*tx_kspace[:,:,tx_origin_x_idx], c, t);
# K-Space of a Single Transmit Beam
plt.figure(); imagesc(kx, kz, np.abs(tx_kspace[:,:,tx_origin_x_idx]), \
(0, np.max(np.abs(tx_kspace[:,:,tx_origin_x_idx]))) );
plt.xlabel('lateral frequency [1/m]');
plt.ylabel('axial frequency [1/m]');
plt.title('K-Space of Selected Transmit Beam');
## Simulate Multistatic Synthetic Aperture Recovery Techniques
# Decode Multistatic data Using REFoCUS
if method == 'Adjoint':
multistatic_recov_pwResp = \
multistatic_recov(kx, f, tx_pwResp, tx_apod, tx_delays, Hinv_adjoint, lambda f: 1);
elif method == 'Tikhonov':
multistatic_recov_pwResp = \
multistatic_recov(kx, f, tx_pwResp, tx_apod, tx_delays, Hinv_tikhonov, 1e-3);
# Calculate K-Space Responses For Each Recovered Element
multistatic_recov_kspace = np.zeros((z.size, x.size, Nelem), dtype=np.complex); # K-Space Response
for elem_idx in np.arange(Nelem): # K-Space Response
_, multistatic_recov_kspace[:,:,elem_idx] = \
pwResp2kSpace(kx, f, multistatic_recov_pwResp[:,:,elem_idx], z, c);
## K-Space and Wavefield for Single Element Transmits
# K-Space of the Adjoint-Based Transmit Response
plt.figure(); plt.subplot(1,2,1);
imagesc(kx, kz, np.mean(np.abs(multistatic_kspace), axis=2), \
(0,np.max(np.mean(np.abs(multistatic_kspace), axis=2))) );
plt.xlabel('lateral frequency [1/m]');
plt.ylabel('axial frequency [1/m]');
plt.title('K-Space of True Single Element Response');
# K-Space of the Ramp-Filtered Adjoint Transmit Response
plt.subplot(1,2,2);
imagesc(kx, kz, np.mean(np.abs(multistatic_recov_kspace), axis=2), \
(0,np.max(np.mean(np.abs(multistatic_recov_kspace), axis=2))) );
plt.xlabel('lateral frequency [1/m]');
plt.ylabel('axial frequency [1/m]');
plt.title('K-Space of Recovered Single Element Response');
plt.show();
# Wavefield Due to Each Individual Transmit Element
elem_idx = 48;
_, _, psf_t_recon = kspace2wavefield(kx, kz, (Kz>0)*multistatic_recov_kspace[:,:,elem_idx], c, t);
_, _, psf_t_true = kspace2wavefield(kx, kz, (Kz>0)*multistatic_kspace[:,:,elem_idx], c, t);
## Plotting the Resulting Wavefield
maxpsf_t_recon = np.max(np.abs(psf_t_recon[~np.isinf(psf_t_recon) & ~np.isnan(psf_t_recon)]));
maxpsf_t_true = np.max(np.abs(psf_t_true[~np.isinf(psf_t_true) & ~np.isnan(psf_t_true)]));
maxpsf_t = np.max(np.abs(psf_t[~np.isinf(psf_t) & ~np.isnan(psf_t)]));
plt.figure(); tpause = 1e-9; kk = 1;
while True:
plt.subplot(1,3,1);
imagesc(x,z,np.real(psf_t_true[:,:,kk]),0.1*maxpsf_t_true*np.array([-1,1]));
plt.ylabel('z Axial Distance (mm)');
plt.xlabel('x Azimuthal Distance (mm)');
plt.title('True Single Element Wavefield');
plt.subplot(1,3,2);
imagesc(x,z,np.real(psf_t_recon[:,:,kk]),0.1*maxpsf_t_recon*np.array([-1,1]));
plt.ylabel('z Axial Distance (mm)');
plt.xlabel('x Azimuthal Distance (mm)');
plt.title('Recovered Single Element Wavefield');
plt.subplot(1,3,3);
imagesc(x,z,np.real(psf_t[:,:,kk]),0.1*maxpsf_t*np.array([-1,1]));
plt.ylabel('z Axial Distance (mm)');
plt.xlabel('x Azimuthal Distance (mm)');
plt.title('Selected Transmit Beam');
if kk == t.size-1:
kk = 1;
else:
kk = kk + 1;
plt.draw();
plt.pause(tpause);
plt.clf(); | 0.447702 | 0.262599 |
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
import math
from scipy.stats import mode
from typing import List
from VASA.vasa import VASA
from VASA.BasePlot import BasePlot
class Scatter(BasePlot):
def __init__(self, v: VASA, desc=None, figsize=(0, 0), titles: str or List[str] = None):
"""
Create the scatter plot object.
Parameters
----------
v: VASA
VASA object where the lisa() method has been called.
desc: str
Plot description used when saving to a file
figsize: (float, float)
Matplotlib figsize specification. Leave as (0, 0) to default
to (n_rows * 4, n_cols * 4).
titles: str | List[str]
String (optional for a single plot) or list of strings to give as titles
to the scatter plots. Defaults as the column name
"""
if not v._ran_lisa:
raise Exception("VASA object has not ran the lisa method yet")
super().__init__("scatter")
self.v: VASA = v
self.plotted = False
self.fontsize = 14
self._desc = desc if desc else "-".join(v.cols)
cols = v.cols
if titles and len(titles) == len(cols):
if not isinstance(titles, list):
titles = [titles]
else:
titles = cols
self.titles = titles
n_cols = math.ceil(len(cols) / 2)
n_rows = min(len(cols), 2)
self.n_cols = n_cols
self.n_rows = n_rows
self.figsize = ((n_rows * 4, n_cols * 4)
if figsize[0] * figsize[1] <= 0 else figsize)
def plot(self, highlight: str = "", show: bool = True, add_noise: bool = False, samples = 0, group=False):
"""
Creates a scatter plot showing hot/cold LISA classifications over
the time period.
Parameters
----------
highlight: str
Geometry group to draw lines for. This value should match
with a v.group_summary() result. Example: geometries are at
the county level and the v.group_summary() function returns the
state code. Then `highlight` should be a two digit number as a
string specifying the state to highlight the counties of.
show: bool = True
Whether to show the plot or save the file.
add_noise: bool = True
Add noise to differentiate lines
"""
fig, axes = plt.subplots(
self.n_cols,
self.n_rows,
figsize=self.figsize,
sharex=True,
sharey=True
)
self.fig = fig
self.axes = [axes] if len(self.v.cols) == 1 else axes.flatten()
count = self.v.reduce("count")
recent = self.v.reduce('recency')
df = count.merge(
recent,
left_on="fips",
right_on="fips",
how="inner",
suffixes=("_count", "_recency")
).reset_index(drop=True)
if df.shape[0] == 0:
return
if highlight != "":
df = df[[
self.v.group_summary(c) == highlight for c in df.fips.values
]].reset_index(drop=True)
if samples > 0:
np.random.seed(self.v.seed)
to_incl = np.random.choice(np.arange(0, df.shape[0]), size=samples, replace=False)
df = df.iloc[to_incl, :].reset_index(drop=True)
for i, ax in enumerate(self.axes):
col: str = self.v.cols[i]
title = self.titles[i] if self.titles and len(
self.titles) >= i + 1 else col
points = df[[f"{col}_count", f"{col}_recency"]].copy()
points["count"] = [
max(c)
for c in points[f"{col}_count"]
]
points["which"] = [
(1 if h > c else (np.nan if h == 0 and c == 0 else 0))
for h, c in points[f"{col}_count"]
]
points = points.rename(
{f"{col}_recency": "recent"},
axis="columns"
)
points = points[["recent", "count", "which"]].dropna().groupby(
["count", "recent"]).agg(np.mean).reset_index()
if highlight != "" or group:
self.__draw_lines(highlight, col, ax,
df[[f"{col}_count", "fips"]], f"{col}_count", add_noise, group)
self.__create_scatter(ax, points, zorder=10)
self.__axis_format(ax)
ax.set_title(title)
self.plotted = True
if not show:
super().save_plot(self._desc, '')
plt.close()
def __draw_lines(self, highlight, col, ax, df, c, add_noise, group):
# df = df[[self.v.group_summary(f) == highlight for f in df.fips]].reset_index(drop=True)
to_select = [f in df.fips.values for f in self.v.fips_order]
lines = np.array(self.v.df[col].tolist())[:, to_select]
if group:
group_order = np.array([self.v.group_summary(f) for f in self.v.fips_order])[to_select]
groups = np.unique(group_order)
output = np.empty((lines.shape[0], len(groups)))
for i, g in enumerate(groups):
group_sel = np.where(group_order == g)[0]
output[:, i] = mode(lines[:, group_sel], axis=1).mode[:, 0]
lines = output
lines_rev = lines[::-1, :]
lines_order = np.argsort(
lines.shape[0] - np.argmax(lines_rev == 1, axis=0) - 1)
colors = [(1 if a > b else 2) for a, b in df[c]]
alpha = 1 / len(lines)
for i in lines_order[::-1]:
val = colors[i]
if val == 0:
continue
color = "red" if val == 1 else "blue"
self.__draw_line(ax, lines[:, i], val,
color, min(1, alpha), add_noise)
def __draw_line(self, ax, xs, val, color, alpha, add_noise):
sig_vals = (xs == val) + 0
sig_idcs = np.where(sig_vals == 1)[0]
if len(sig_idcs) == 0:
return
start = max(sig_idcs[0] - 1, 0) if len(sig_idcs) > 0 else 0
stop = sig_idcs[-1] + 1
# stop line at list sig value
xs = xs[start:stop]
ys = np.cumsum(xs == val)
if add_noise:
np.random.seed(self.v.seed)
ys = ys + np.random.normal(0, 0.125, len(ys))
ax.plot(
np.arange(start + 1, stop + 1),
# + np.random.normal(0, 1/16, size=len(xs)),
ys,
c=color,
alpha=alpha
)
def __create_scatter(self, ax, df: pd.DataFrame, **kwargs):
sns.scatterplot(
x="recent",
y="count",
data=df,
hue="which",
palette="bwr",
ax=ax,
s=30,
**kwargs
)
def __axis_format(self, ax):
_, max_x = ax.get_xlim()
ax.set_xlim(0, max_x)
ax.set_ylim(0, max_x)
ax.grid(False)
ax.set_ylabel("Count", fontsize=self.fontsize)
ax.set_xlabel("Last Week Number", fontsize=self.fontsize)
import matplotlib.patches as mpatches
hot_spot = mpatches.Patch(color="red", label="Hotspot")
cold_spot = mpatches.Patch(color="blue", label="Coldspot")
ax.legend(handles=[hot_spot, cold_spot]) | VASA/scatter.py | import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
import math
from scipy.stats import mode
from typing import List
from VASA.vasa import VASA
from VASA.BasePlot import BasePlot
class Scatter(BasePlot):
def __init__(self, v: VASA, desc=None, figsize=(0, 0), titles: str or List[str] = None):
"""
Create the scatter plot object.
Parameters
----------
v: VASA
VASA object where the lisa() method has been called.
desc: str
Plot description used when saving to a file
figsize: (float, float)
Matplotlib figsize specification. Leave as (0, 0) to default
to (n_rows * 4, n_cols * 4).
titles: str | List[str]
String (optional for a single plot) or list of strings to give as titles
to the scatter plots. Defaults as the column name
"""
if not v._ran_lisa:
raise Exception("VASA object has not ran the lisa method yet")
super().__init__("scatter")
self.v: VASA = v
self.plotted = False
self.fontsize = 14
self._desc = desc if desc else "-".join(v.cols)
cols = v.cols
if titles and len(titles) == len(cols):
if not isinstance(titles, list):
titles = [titles]
else:
titles = cols
self.titles = titles
n_cols = math.ceil(len(cols) / 2)
n_rows = min(len(cols), 2)
self.n_cols = n_cols
self.n_rows = n_rows
self.figsize = ((n_rows * 4, n_cols * 4)
if figsize[0] * figsize[1] <= 0 else figsize)
def plot(self, highlight: str = "", show: bool = True, add_noise: bool = False, samples = 0, group=False):
"""
Creates a scatter plot showing hot/cold LISA classifications over
the time period.
Parameters
----------
highlight: str
Geometry group to draw lines for. This value should match
with a v.group_summary() result. Example: geometries are at
the county level and the v.group_summary() function returns the
state code. Then `highlight` should be a two digit number as a
string specifying the state to highlight the counties of.
show: bool = True
Whether to show the plot or save the file.
add_noise: bool = True
Add noise to differentiate lines
"""
fig, axes = plt.subplots(
self.n_cols,
self.n_rows,
figsize=self.figsize,
sharex=True,
sharey=True
)
self.fig = fig
self.axes = [axes] if len(self.v.cols) == 1 else axes.flatten()
count = self.v.reduce("count")
recent = self.v.reduce('recency')
df = count.merge(
recent,
left_on="fips",
right_on="fips",
how="inner",
suffixes=("_count", "_recency")
).reset_index(drop=True)
if df.shape[0] == 0:
return
if highlight != "":
df = df[[
self.v.group_summary(c) == highlight for c in df.fips.values
]].reset_index(drop=True)
if samples > 0:
np.random.seed(self.v.seed)
to_incl = np.random.choice(np.arange(0, df.shape[0]), size=samples, replace=False)
df = df.iloc[to_incl, :].reset_index(drop=True)
for i, ax in enumerate(self.axes):
col: str = self.v.cols[i]
title = self.titles[i] if self.titles and len(
self.titles) >= i + 1 else col
points = df[[f"{col}_count", f"{col}_recency"]].copy()
points["count"] = [
max(c)
for c in points[f"{col}_count"]
]
points["which"] = [
(1 if h > c else (np.nan if h == 0 and c == 0 else 0))
for h, c in points[f"{col}_count"]
]
points = points.rename(
{f"{col}_recency": "recent"},
axis="columns"
)
points = points[["recent", "count", "which"]].dropna().groupby(
["count", "recent"]).agg(np.mean).reset_index()
if highlight != "" or group:
self.__draw_lines(highlight, col, ax,
df[[f"{col}_count", "fips"]], f"{col}_count", add_noise, group)
self.__create_scatter(ax, points, zorder=10)
self.__axis_format(ax)
ax.set_title(title)
self.plotted = True
if not show:
super().save_plot(self._desc, '')
plt.close()
def __draw_lines(self, highlight, col, ax, df, c, add_noise, group):
# df = df[[self.v.group_summary(f) == highlight for f in df.fips]].reset_index(drop=True)
to_select = [f in df.fips.values for f in self.v.fips_order]
lines = np.array(self.v.df[col].tolist())[:, to_select]
if group:
group_order = np.array([self.v.group_summary(f) for f in self.v.fips_order])[to_select]
groups = np.unique(group_order)
output = np.empty((lines.shape[0], len(groups)))
for i, g in enumerate(groups):
group_sel = np.where(group_order == g)[0]
output[:, i] = mode(lines[:, group_sel], axis=1).mode[:, 0]
lines = output
lines_rev = lines[::-1, :]
lines_order = np.argsort(
lines.shape[0] - np.argmax(lines_rev == 1, axis=0) - 1)
colors = [(1 if a > b else 2) for a, b in df[c]]
alpha = 1 / len(lines)
for i in lines_order[::-1]:
val = colors[i]
if val == 0:
continue
color = "red" if val == 1 else "blue"
self.__draw_line(ax, lines[:, i], val,
color, min(1, alpha), add_noise)
def __draw_line(self, ax, xs, val, color, alpha, add_noise):
sig_vals = (xs == val) + 0
sig_idcs = np.where(sig_vals == 1)[0]
if len(sig_idcs) == 0:
return
start = max(sig_idcs[0] - 1, 0) if len(sig_idcs) > 0 else 0
stop = sig_idcs[-1] + 1
# stop line at list sig value
xs = xs[start:stop]
ys = np.cumsum(xs == val)
if add_noise:
np.random.seed(self.v.seed)
ys = ys + np.random.normal(0, 0.125, len(ys))
ax.plot(
np.arange(start + 1, stop + 1),
# + np.random.normal(0, 1/16, size=len(xs)),
ys,
c=color,
alpha=alpha
)
def __create_scatter(self, ax, df: pd.DataFrame, **kwargs):
sns.scatterplot(
x="recent",
y="count",
data=df,
hue="which",
palette="bwr",
ax=ax,
s=30,
**kwargs
)
def __axis_format(self, ax):
_, max_x = ax.get_xlim()
ax.set_xlim(0, max_x)
ax.set_ylim(0, max_x)
ax.grid(False)
ax.set_ylabel("Count", fontsize=self.fontsize)
ax.set_xlabel("Last Week Number", fontsize=self.fontsize)
import matplotlib.patches as mpatches
hot_spot = mpatches.Patch(color="red", label="Hotspot")
cold_spot = mpatches.Patch(color="blue", label="Coldspot")
ax.legend(handles=[hot_spot, cold_spot]) | 0.878184 | 0.552902 |
#Copyright 2015 RAPP
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
PKG='ros_nodes'
import sys
import unittest
import rospy
import roslib
import rospkg
from rapp_platform_ros_communications.srv import (
FaceDetectionRosSrv,
FaceDetectionRosSrvRequest
)
class FaceDetFunc(unittest.TestCase):
"""Handles the face detection functional tests
"""
## Tests face detection with Lenna image. Should return 1 face
def test_faceExists(self):
rospack = rospkg.RosPack()
face_service = rospy.get_param("rapp_face_detection_detect_faces_topic")
rospy.wait_for_service(face_service)
fd_service = rospy.ServiceProxy(face_service, FaceDetectionRosSrv)
req = FaceDetectionRosSrvRequest()
req.imageFilename = rospack.get_path('rapp_testing_tools') + \
'/test_data/Lenna.png'
response = fd_service(req)
faces_num = len(response.faces_up_left)
self.assertEqual( faces_num, 1 )
## Tests face detection with realistic images. Should return 1 face
def test_faceExists_realistic(self):
rospack = rospkg.RosPack()
face_service = rospy.get_param("rapp_face_detection_detect_faces_topic")
rospy.wait_for_service(face_service)
fd_service = rospy.ServiceProxy(face_service, FaceDetectionRosSrv)
req = FaceDetectionRosSrvRequest()
req.imageFilename = rospack.get_path('rapp_testing_tools') + \
'/test_data/face_samples/klpanagi_close_straight.jpg'
response = fd_service(req)
faces_num = len(response.faces_up_left)
self.assertEqual( faces_num, 1 )
## Tests face detection with realistic images. Should return 1 face
def test_faceExists_realistic_fast(self):
rospack = rospkg.RosPack()
face_service = rospy.get_param("rapp_face_detection_detect_faces_topic")
rospy.wait_for_service(face_service)
fd_service = rospy.ServiceProxy(face_service, FaceDetectionRosSrv)
req = FaceDetectionRosSrvRequest()
req.imageFilename = rospack.get_path('rapp_testing_tools') + \
'/test_data/face_samples/klpanagi_close_straight.jpg'
req.fast = True
response = fd_service(req)
faces_num = len(response.faces_up_left)
self.assertEqual( faces_num, 1 )
## Tests face detection with a NAO captured image. Should return 1 face
def test_faceExists_realistic_2(self):
rospack = rospkg.RosPack()
face_service = rospy.get_param("rapp_face_detection_detect_faces_topic")
rospy.wait_for_service(face_service)
fd_service = rospy.ServiceProxy(face_service, FaceDetectionRosSrv)
req = FaceDetectionRosSrvRequest()
req.imageFilename = rospack.get_path('rapp_testing_tools') + \
'/test_data/face_samples/etsardou_medium.jpg'
response = fd_service(req)
faces_num = len(response.faces_up_left)
self.assertEqual( faces_num, 1 )
## Tests face detection with a NAO captured image from almost 2 meters. Should return 1 face
def test_faceExists_realistic_2(self):
rospack = rospkg.RosPack()
face_service = rospy.get_param("rapp_face_detection_detect_faces_topic")
rospy.wait_for_service(face_service)
fd_service = rospy.ServiceProxy(face_service, FaceDetectionRosSrv)
req = FaceDetectionRosSrvRequest()
req.imageFilename = rospack.get_path('rapp_testing_tools') + \
'/test_data/face_samples/klpanagi_medium_straight.jpg'
response = fd_service(req)
faces_num = len(response.faces_up_left)
self.assertEqual( faces_num, 1 )
## Stress test for face detection. 20 calls in a row
def test_faceExists_stress(self):
rospack = rospkg.RosPack()
face_service = rospy.get_param("rapp_face_detection_detect_faces_topic")
rospy.wait_for_service(face_service)
fd_service = rospy.ServiceProxy(face_service, FaceDetectionRosSrv)
req = FaceDetectionRosSrvRequest()
req.imageFilename = rospack.get_path('rapp_testing_tools') + \
'/test_data/Lenna.png'
for i in range(0, 20):
response = fd_service(req)
faces_num = len(response.faces_up_left)
self.assertEqual( faces_num, 1 )
## Tests face detection with an image that does not contain faces. Should return 0 faces
def test_faceDoesNotExist(self):
rospack = rospkg.RosPack()
face_service = rospy.get_param("rapp_face_detection_detect_faces_topic")
rospy.wait_for_service(face_service)
fd_service = rospy.ServiceProxy(face_service, FaceDetectionRosSrv)
req = FaceDetectionRosSrvRequest()
req.imageFilename = rospack.get_path('rapp_testing_tools') + \
'/test_data/qr_code_rapp.jpg'
response = fd_service(req)
faces_num = len(response.faces_up_left)
self.assertEqual( faces_num, 0 )
## Tests face detection with a non existent image. Should return 0 faces
def test_fileDoesNotExist(self):
rospack = rospkg.RosPack()
face_service = rospy.get_param("rapp_face_detection_detect_faces_topic")
rospy.wait_for_service(face_service)
fd_service = rospy.ServiceProxy(face_service, FaceDetectionRosSrv)
req = FaceDetectionRosSrvRequest()
req.imageFilename = rospack.get_path('rapp_testing_tools') + \
'/test_data/qr_code_rapp.png'
response = fd_service(req)
faces_num = len(response.faces_up_left)
self.assertEqual( faces_num, 0 )
## Tests face detection with an audio file. Should not crush an return 0 faces
def test_fileExistsButItAudio(self):
rospack = rospkg.RosPack()
face_service = rospy.get_param("rapp_face_detection_detect_faces_topic")
rospy.wait_for_service(face_service)
fd_service = rospy.ServiceProxy(face_service, FaceDetectionRosSrv)
req = FaceDetectionRosSrvRequest()
req.imageFilename = rospack.get_path('rapp_testing_tools') + \
'/test_data/silence_sample.wav'
response = fd_service(req)
faces_num = len(response.faces_up_left)
self.assertEqual( faces_num, 0 )
## The main function. Initializes the functional tests
if __name__ == '__main__':
import rosunit
rosunit.unitrun(PKG, 'FaceDetFunc', FaceDetFunc) | rapp_face_detection/tests/face_detection/functional_tests.py |
#Copyright 2015 RAPP
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
PKG='ros_nodes'
import sys
import unittest
import rospy
import roslib
import rospkg
from rapp_platform_ros_communications.srv import (
FaceDetectionRosSrv,
FaceDetectionRosSrvRequest
)
class FaceDetFunc(unittest.TestCase):
"""Handles the face detection functional tests
"""
## Tests face detection with Lenna image. Should return 1 face
def test_faceExists(self):
rospack = rospkg.RosPack()
face_service = rospy.get_param("rapp_face_detection_detect_faces_topic")
rospy.wait_for_service(face_service)
fd_service = rospy.ServiceProxy(face_service, FaceDetectionRosSrv)
req = FaceDetectionRosSrvRequest()
req.imageFilename = rospack.get_path('rapp_testing_tools') + \
'/test_data/Lenna.png'
response = fd_service(req)
faces_num = len(response.faces_up_left)
self.assertEqual( faces_num, 1 )
## Tests face detection with realistic images. Should return 1 face
def test_faceExists_realistic(self):
rospack = rospkg.RosPack()
face_service = rospy.get_param("rapp_face_detection_detect_faces_topic")
rospy.wait_for_service(face_service)
fd_service = rospy.ServiceProxy(face_service, FaceDetectionRosSrv)
req = FaceDetectionRosSrvRequest()
req.imageFilename = rospack.get_path('rapp_testing_tools') + \
'/test_data/face_samples/klpanagi_close_straight.jpg'
response = fd_service(req)
faces_num = len(response.faces_up_left)
self.assertEqual( faces_num, 1 )
## Tests face detection with realistic images. Should return 1 face
def test_faceExists_realistic_fast(self):
rospack = rospkg.RosPack()
face_service = rospy.get_param("rapp_face_detection_detect_faces_topic")
rospy.wait_for_service(face_service)
fd_service = rospy.ServiceProxy(face_service, FaceDetectionRosSrv)
req = FaceDetectionRosSrvRequest()
req.imageFilename = rospack.get_path('rapp_testing_tools') + \
'/test_data/face_samples/klpanagi_close_straight.jpg'
req.fast = True
response = fd_service(req)
faces_num = len(response.faces_up_left)
self.assertEqual( faces_num, 1 )
## Tests face detection with a NAO captured image. Should return 1 face
def test_faceExists_realistic_2(self):
rospack = rospkg.RosPack()
face_service = rospy.get_param("rapp_face_detection_detect_faces_topic")
rospy.wait_for_service(face_service)
fd_service = rospy.ServiceProxy(face_service, FaceDetectionRosSrv)
req = FaceDetectionRosSrvRequest()
req.imageFilename = rospack.get_path('rapp_testing_tools') + \
'/test_data/face_samples/etsardou_medium.jpg'
response = fd_service(req)
faces_num = len(response.faces_up_left)
self.assertEqual( faces_num, 1 )
## Tests face detection with a NAO captured image from almost 2 meters. Should return 1 face
def test_faceExists_realistic_2(self):
rospack = rospkg.RosPack()
face_service = rospy.get_param("rapp_face_detection_detect_faces_topic")
rospy.wait_for_service(face_service)
fd_service = rospy.ServiceProxy(face_service, FaceDetectionRosSrv)
req = FaceDetectionRosSrvRequest()
req.imageFilename = rospack.get_path('rapp_testing_tools') + \
'/test_data/face_samples/klpanagi_medium_straight.jpg'
response = fd_service(req)
faces_num = len(response.faces_up_left)
self.assertEqual( faces_num, 1 )
## Stress test for face detection. 20 calls in a row
def test_faceExists_stress(self):
rospack = rospkg.RosPack()
face_service = rospy.get_param("rapp_face_detection_detect_faces_topic")
rospy.wait_for_service(face_service)
fd_service = rospy.ServiceProxy(face_service, FaceDetectionRosSrv)
req = FaceDetectionRosSrvRequest()
req.imageFilename = rospack.get_path('rapp_testing_tools') + \
'/test_data/Lenna.png'
for i in range(0, 20):
response = fd_service(req)
faces_num = len(response.faces_up_left)
self.assertEqual( faces_num, 1 )
## Tests face detection with an image that does not contain faces. Should return 0 faces
def test_faceDoesNotExist(self):
rospack = rospkg.RosPack()
face_service = rospy.get_param("rapp_face_detection_detect_faces_topic")
rospy.wait_for_service(face_service)
fd_service = rospy.ServiceProxy(face_service, FaceDetectionRosSrv)
req = FaceDetectionRosSrvRequest()
req.imageFilename = rospack.get_path('rapp_testing_tools') + \
'/test_data/qr_code_rapp.jpg'
response = fd_service(req)
faces_num = len(response.faces_up_left)
self.assertEqual( faces_num, 0 )
## Tests face detection with a non existent image. Should return 0 faces
def test_fileDoesNotExist(self):
rospack = rospkg.RosPack()
face_service = rospy.get_param("rapp_face_detection_detect_faces_topic")
rospy.wait_for_service(face_service)
fd_service = rospy.ServiceProxy(face_service, FaceDetectionRosSrv)
req = FaceDetectionRosSrvRequest()
req.imageFilename = rospack.get_path('rapp_testing_tools') + \
'/test_data/qr_code_rapp.png'
response = fd_service(req)
faces_num = len(response.faces_up_left)
self.assertEqual( faces_num, 0 )
## Tests face detection with an audio file. Should not crush an return 0 faces
def test_fileExistsButItAudio(self):
rospack = rospkg.RosPack()
face_service = rospy.get_param("rapp_face_detection_detect_faces_topic")
rospy.wait_for_service(face_service)
fd_service = rospy.ServiceProxy(face_service, FaceDetectionRosSrv)
req = FaceDetectionRosSrvRequest()
req.imageFilename = rospack.get_path('rapp_testing_tools') + \
'/test_data/silence_sample.wav'
response = fd_service(req)
faces_num = len(response.faces_up_left)
self.assertEqual( faces_num, 0 )
## The main function. Initializes the functional tests
if __name__ == '__main__':
import rosunit
rosunit.unitrun(PKG, 'FaceDetFunc', FaceDetFunc) | 0.617167 | 0.305982 |
""" generated source for module ConfigurableConfigPanel """
# package: org.ggp.base.player.gamer.statemachine.configurable
import java.awt.GridBagConstraints
import java.awt.GridBagLayout
import java.awt.Insets
import java.awt.event.ActionEvent
import java.awt.event.ActionListener
import java.io.BufferedReader
import java.io.BufferedWriter
import java.io.File
import java.io.FileInputStream
import java.io.FileWriter
import java.io.IOException
import java.io.InputStreamReader
import java.nio.charset.Charset
import java.util.Random
import javax.swing.AbstractAction
import javax.swing.JButton
import javax.swing.JCheckBox
import javax.swing.JComboBox
import javax.swing.JFileChooser
import javax.swing.JLabel
import javax.swing.JPanel
import javax.swing.JSpinner
import javax.swing.JTextField
import javax.swing.SpinnerNumberModel
import javax.swing.border.TitledBorder
import javax.swing.event.ChangeEvent
import javax.swing.event.ChangeListener
import javax.swing.event.DocumentEvent
import javax.swing.event.DocumentListener
import javax.swing.filechooser.FileFilter
import org.ggp.base.apps.player.config.ConfigPanel
import external.JSON.JSONException
import external.JSON.JSONObject
class ConfigurableConfigPanel(ConfigPanel, ActionListener, DocumentListener, ChangeListener):
""" generated source for class ConfigurableConfigPanel """
serialVersionUID = 1L
associatedFile = File()
associatedFileField = JTextField()
params = JSONObject()
savedParams = str()
loadButton = JButton()
saveAsButton = JButton()
saveButton = JButton()
name = JTextField()
strategy = JComboBox()
metagameStrategy = JComboBox()
stateMachine = JComboBox()
cacheStateMachine = JCheckBox()
maxPlys = JSpinner()
heuristicFocus = JSpinner()
heuristicMobility = JSpinner()
heuristicOpponentFocus = JSpinner()
heuristicOpponentMobility = JSpinner()
mcDecayRate = JSpinner()
rightPanel = JPanel()
def __init__(self):
""" generated source for method __init__ """
super(ConfigurableConfigPanel, self).__init__(GridBagLayout())
leftPanel = JPanel(GridBagLayout())
leftPanel.setBorder(TitledBorder("Major Parameters"))
self.rightPanel = JPanel(GridBagLayout())
self.rightPanel.setBorder(TitledBorder("Minor Parameters"))
self.strategy = JComboBox([None]*)
self.metagameStrategy = JComboBox([None]*)
self.stateMachine = JComboBox([None]*)
self.cacheStateMachine = JCheckBox()
self.maxPlys = JSpinner(SpinnerNumberModel(1, 1, 100, 1))
self.heuristicFocus = JSpinner(SpinnerNumberModel(1, 0, 10, 1))
self.heuristicMobility = JSpinner(SpinnerNumberModel(1, 0, 10, 1))
self.heuristicOpponentFocus = JSpinner(SpinnerNumberModel(1, 0, 10, 1))
self.heuristicOpponentMobility = JSpinner(SpinnerNumberModel(1, 0, 10, 1))
self.mcDecayRate = JSpinner(SpinnerNumberModel(0, 0, 99, 1))
self.name = JTextField()
self.name.setColumns(20)
self.name.setText("Player #" + Random().nextInt(100000))
self.loadButton = JButton(loadButtonMethod())
self.saveButton = JButton(saveButtonMethod())
self.saveAsButton = JButton(saveAsButtonMethod())
self.associatedFileField = JTextField()
self.associatedFileField.setEnabled(False)
buttons = JPanel()
buttons.add(self.loadButton)
buttons.add(self.saveButton)
buttons.add(self.saveAsButton)
nRow = 0
leftPanel.add(JLabel("Name"), GridBagConstraints(0, nRow, 1, 1, 0.0, 0.0, GridBagConstraints.EAST, GridBagConstraints.NONE, Insets(5, 5, 5, 5), 5, 5))
__nRow_0 = nRow
nRow += 1
leftPanel.add(self.name, GridBagConstraints(1, __nRow_0, 1, 1, 0.0, 0.0, GridBagConstraints.WEST, GridBagConstraints.HORIZONTAL, Insets(5, 5, 5, 5), 5, 5))
leftPanel.add(JLabel("Gaming Strategy"), GridBagConstraints(0, nRow, 1, 1, 0.0, 0.0, GridBagConstraints.EAST, GridBagConstraints.NONE, Insets(5, 5, 5, 5), 5, 5))
__nRow_1 = nRow
nRow += 1
leftPanel.add(self.strategy, GridBagConstraints(1, __nRow_1, 1, 1, 0.0, 0.0, GridBagConstraints.WEST, GridBagConstraints.HORIZONTAL, Insets(5, 5, 5, 5), 5, 5))
leftPanel.add(JLabel("Metagame Strategy"), GridBagConstraints(0, nRow, 1, 1, 0.0, 0.0, GridBagConstraints.EAST, GridBagConstraints.NONE, Insets(5, 5, 5, 5), 5, 5))
__nRow_2 = nRow
nRow += 1
leftPanel.add(self.metagameStrategy, GridBagConstraints(1, __nRow_2, 1, 1, 0.0, 0.0, GridBagConstraints.WEST, GridBagConstraints.HORIZONTAL, Insets(5, 5, 5, 5), 5, 5))
leftPanel.add(JLabel("State Machine"), GridBagConstraints(0, nRow, 1, 1, 0.0, 0.0, GridBagConstraints.EAST, GridBagConstraints.NONE, Insets(5, 5, 5, 5), 5, 5))
__nRow_3 = nRow
nRow += 1
leftPanel.add(self.stateMachine, GridBagConstraints(1, __nRow_3, 1, 1, 0.0, 0.0, GridBagConstraints.WEST, GridBagConstraints.HORIZONTAL, Insets(5, 5, 5, 5), 5, 5))
__nRow_4 = nRow
nRow += 1
leftPanel.add(buttons, GridBagConstraints(1, __nRow_4, 2, 1, 1.0, 1.0, GridBagConstraints.SOUTHEAST, GridBagConstraints.NONE, Insets(5, 5, 0, 5), 0, 0))
leftPanel.add(self.associatedFileField, GridBagConstraints(0, nRow, 2, 1, 1.0, 0.0, GridBagConstraints.SOUTHEAST, GridBagConstraints.HORIZONTAL, Insets(0, 5, 5, 5), 0, 0))
layoutRightPanel()
add(leftPanel, GridBagConstraints(0, 0, 1, 1, 0.0, 1.0, GridBagConstraints.CENTER, GridBagConstraints.BOTH, Insets(5, 5, 5, 5), 5, 5))
add(self.rightPanel, GridBagConstraints(1, 0, 1, 1, 1.0, 1.0, GridBagConstraints.CENTER, GridBagConstraints.BOTH, Insets(5, 5, 5, 5), 5, 5))
self.params = JSONObject()
syncJSONtoUI()
self.strategy.addActionListener(self)
self.metagameStrategy.addActionListener(self)
self.stateMachine.addActionListener(self)
self.cacheStateMachine.addActionListener(self)
self.maxPlys.addChangeListener(self)
self.heuristicFocus.addChangeListener(self)
self.heuristicMobility.addChangeListener(self)
self.heuristicOpponentFocus.addChangeListener(self)
self.heuristicOpponentMobility.addChangeListener(self)
self.mcDecayRate.addChangeListener(self)
self.name.getDocument().addDocumentListener(self)
def layoutRightPanel(self):
""" generated source for method layoutRightPanel """
nRow = 0
self.rightPanel.removeAll()
self.rightPanel.add(JLabel("State machine cache?"), GridBagConstraints(0, nRow, 1, 1, 0.0, 0.0, GridBagConstraints.EAST, GridBagConstraints.NONE, Insets(5, 5, 5, 5), 5, 5))
__nRow_5 = nRow
nRow += 1
self.rightPanel.add(self.cacheStateMachine, GridBagConstraints(1, __nRow_5, 1, 1, 1.0, 0.0, GridBagConstraints.WEST, GridBagConstraints.NONE, Insets(5, 5, 5, 5), 5, 5))
if self.strategy.getSelectedItem().__str__() == "Heuristic":
__nRow_6 = nRow
nRow += 1
__nRow_7 = nRow
nRow += 1
__nRow_8 = nRow
nRow += 1
__nRow_9 = nRow
nRow += 1
__nRow_10 = nRow
nRow += 1
self.rightPanel.add(JLabel("Max plys?"), GridBagConstraints(0, nRow, 1, 1, 0.0, 0.0, GridBagConstraints.EAST, GridBagConstraints.NONE, Insets(5, 5, 5, 5), 5, 5))
self.rightPanel.add(self.maxPlys, GridBagConstraints(1, __nRow_6, 1, 1, 0.0, 0.0, GridBagConstraints.WEST, GridBagConstraints.NONE, Insets(5, 5, 5, 5), 5, 5))
self.rightPanel.add(JLabel("Focus Heuristic Weight"), GridBagConstraints(0, nRow, 1, 1, 0.0, 0.0, GridBagConstraints.EAST, GridBagConstraints.NONE, Insets(5, 5, 5, 5), 5, 5))
self.rightPanel.add(self.heuristicFocus, GridBagConstraints(1, __nRow_7, 1, 1, 0.0, 0.0, GridBagConstraints.WEST, GridBagConstraints.NONE, Insets(5, 5, 5, 5), 5, 5))
self.rightPanel.add(JLabel("Mobility Heuristic Weight"), GridBagConstraints(0, nRow, 1, 1, 0.0, 0.0, GridBagConstraints.EAST, GridBagConstraints.NONE, Insets(5, 5, 5, 5), 5, 5))
self.rightPanel.add(self.heuristicMobility, GridBagConstraints(1, __nRow_8, 1, 1, 0.0, 0.0, GridBagConstraints.WEST, GridBagConstraints.NONE, Insets(5, 5, 5, 5), 5, 5))
self.rightPanel.add(JLabel("Opponent Focus Heuristic Weight"), GridBagConstraints(0, nRow, 1, 1, 0.0, 0.0, GridBagConstraints.EAST, GridBagConstraints.NONE, Insets(5, 5, 5, 5), 5, 5))
self.rightPanel.add(self.heuristicOpponentFocus, GridBagConstraints(1, __nRow_9, 1, 1, 0.0, 0.0, GridBagConstraints.WEST, GridBagConstraints.NONE, Insets(5, 5, 5, 5), 5, 5))
self.rightPanel.add(JLabel("Opponent Mobility Heuristic Weight"), GridBagConstraints(0, nRow, 1, 1, 0.0, 0.0, GridBagConstraints.EAST, GridBagConstraints.NONE, Insets(5, 5, 5, 5), 5, 5))
self.rightPanel.add(self.heuristicOpponentMobility, GridBagConstraints(1, __nRow_10, 1, 1, 0.0, 0.0, GridBagConstraints.WEST, GridBagConstraints.NONE, Insets(5, 5, 5, 5), 5, 5))
if self.strategy.getSelectedItem().__str__() == "Monte Carlo":
__nRow_11 = nRow
nRow += 1
self.rightPanel.add(JLabel("Goal Decay Rate"), GridBagConstraints(0, nRow, 1, 1, 0.0, 0.0, GridBagConstraints.EAST, GridBagConstraints.NONE, Insets(5, 5, 5, 5), 5, 5))
self.rightPanel.add(self.mcDecayRate, GridBagConstraints(1, __nRow_11, 1, 1, 0.0, 0.0, GridBagConstraints.WEST, GridBagConstraints.NONE, Insets(5, 5, 5, 5), 5, 5))
__nRow_12 = nRow
nRow += 1
self.rightPanel.add(JLabel(), GridBagConstraints(2, __nRow_12, 1, 1, 1.0, 1.0, GridBagConstraints.SOUTHEAST, GridBagConstraints.NONE, Insets(5, 5, 5, 5), 5, 5))
self.rightPanel.repaint()
@SuppressWarnings("unchecked")
def getParameter(self, name, defaultValue):
""" generated source for method getParameter """
try:
if self.params.has(name):
return self.params.get(name)
else:
return defaultValue
except JSONException as je:
return defaultValue
def actionPerformed(self, arg0):
""" generated source for method actionPerformed """
if arg0.getSource() == self.strategy:
self.layoutRightPanel()
syncJSONtoUI()
def changedUpdate(self, e):
""" generated source for method changedUpdate """
syncJSONtoUI()
def insertUpdate(self, e):
""" generated source for method insertUpdate """
syncJSONtoUI()
def removeUpdate(self, e):
""" generated source for method removeUpdate """
syncJSONtoUI()
def stateChanged(self, arg0):
""" generated source for method stateChanged """
syncJSONtoUI()
def syncJSONtoUI(self):
""" generated source for method syncJSONtoUI """
if settingUI:
return
self.params = getJSONfromUI()
self.saveButton.setEnabled(self.savedParams == None or not self.params.__str__() == self.savedParams)
def getJSONfromUI(self):
""" generated source for method getJSONfromUI """
newParams = JSONObject()
try:
if not self.name.getText().isEmpty():
newParams.put("name", self.name.getText())
newParams.put("strategy", self.strategy.getSelectedItem().__str__())
newParams.put("metagameStrategy", self.metagameStrategy.getSelectedItem().__str__())
newParams.put("stateMachine", self.stateMachine.getSelectedItem().__str__())
newParams.put("cacheStateMachine", self.cacheStateMachine.isSelected())
newParams.put("maxPlys", self.maxPlys.getModel().getValue())
newParams.put("heuristicFocus", self.heuristicFocus.getModel().getValue())
newParams.put("heuristicMobility", self.heuristicMobility.getModel().getValue())
newParams.put("heuristicOpponentFocus", self.heuristicOpponentFocus.getModel().getValue())
newParams.put("heuristicOpponentMobility", self.heuristicOpponentMobility.getModel().getValue())
newParams.put("mcDecayRate", self.mcDecayRate.getModel().getValue())
except JSONException as je:
je.printStackTrace()
return newParams
settingUI = False
def setUIfromJSON(self):
""" generated source for method setUIfromJSON """
self.settingUI = True
try:
if self.params.has("name"):
self.name.setText(self.params.getString("name"))
if self.params.has("strategy"):
self.strategy.setSelectedItem(self.params.getString("strategy"))
if self.params.has("metagameStrategy"):
self.metagameStrategy.setSelectedItem(self.params.getString("metagameStrategy"))
if self.params.has("stateMachine"):
self.stateMachine.setSelectedItem(self.params.getString("stateMachine"))
if self.params.has("cacheStateMachine"):
self.cacheStateMachine.setSelected(self.params.getBoolean("cacheStateMachine"))
if self.params.has("maxPlys"):
self.maxPlys.getModel().setValue(self.params.getInt("maxPlys"))
if self.params.has("heuristicFocus"):
self.heuristicFocus.getModel().setValue(self.params.getInt("heuristicFocus"))
if self.params.has("heuristicMobility"):
self.heuristicMobility.getModel().setValue(self.params.getInt("heuristicMobility"))
if self.params.has("heuristicOpponentFocus"):
self.heuristicOpponentFocus.getModel().setValue(self.params.getInt("heuristicOpponentFocus"))
if self.params.has("heuristicOpponentMobility"):
self.heuristicOpponentMobility.getModel().setValue(self.params.getInt("heuristicOpponentMobility"))
if self.params.has("mcDecayRate"):
self.mcDecayRate.getModel().setValue(self.params.getInt("mcDecayRate"))
except JSONException as je:
je.printStackTrace()
finally:
self.settingUI = False
def loadParamsJSON(self, fromFile):
""" generated source for method loadParamsJSON """
if not fromFile.exists():
return
self.associatedFile = fromFile
self.associatedFileField.setText(self.associatedFile.getPath())
self.params = JSONObject()
try:
try:
while (line = br.readLine()) != None:
pdata.append(line)
finally:
br.close()
self.params = JSONObject(pdata.__str__())
self.savedParams = self.params.__str__()
self.setUIfromJSON()
self.syncJSONtoUI()
except Exception as e:
e.printStackTrace()
def saveParamsJSON(self, saveAs):
""" generated source for method saveParamsJSON """
try:
if saveAs or self.associatedFile == None:
fc.setFileFilter(PlayerFilter())
if returnVal == JFileChooser.APPROVE_OPTION and fc.getSelectedFile() != None:
if toFile.__name__.contains("."):
self.associatedFile = File(toFile.getParentFile(), toFile.__name__.substring(0, toFile.__name__.lastIndexOf(".")) + ".player")
else:
self.associatedFile = File(toFile.getParentFile(), toFile.__name__ + ".player")
self.associatedFileField.setText(self.associatedFile.getPath())
else:
return
bw.write(self.params.__str__())
bw.close()
self.savedParams = self.params.__str__()
self.syncJSONtoUI()
except IOException as ie:
ie.printStackTrace()
def saveButtonMethod(self):
""" generated source for method saveButtonMethod """
return AbstractAction("Save")
def saveAsButtonMethod(self):
""" generated source for method saveAsButtonMethod """
return AbstractAction("Save As")
def loadButtonMethod(self):
""" generated source for method loadButtonMethod """
return AbstractAction("Load")
class PlayerFilter(FileFilter):
""" generated source for class PlayerFilter """
def accept(self, f):
""" generated source for method accept """
if f.isDirectory():
return True
return f.__name__.endsWith(".player")
def getDescription(self):
""" generated source for method getDescription """
return "GGP Players (*.player)" | ggpy/cruft/autocode/ConfigurableConfigPanel.py | """ generated source for module ConfigurableConfigPanel """
# package: org.ggp.base.player.gamer.statemachine.configurable
import java.awt.GridBagConstraints
import java.awt.GridBagLayout
import java.awt.Insets
import java.awt.event.ActionEvent
import java.awt.event.ActionListener
import java.io.BufferedReader
import java.io.BufferedWriter
import java.io.File
import java.io.FileInputStream
import java.io.FileWriter
import java.io.IOException
import java.io.InputStreamReader
import java.nio.charset.Charset
import java.util.Random
import javax.swing.AbstractAction
import javax.swing.JButton
import javax.swing.JCheckBox
import javax.swing.JComboBox
import javax.swing.JFileChooser
import javax.swing.JLabel
import javax.swing.JPanel
import javax.swing.JSpinner
import javax.swing.JTextField
import javax.swing.SpinnerNumberModel
import javax.swing.border.TitledBorder
import javax.swing.event.ChangeEvent
import javax.swing.event.ChangeListener
import javax.swing.event.DocumentEvent
import javax.swing.event.DocumentListener
import javax.swing.filechooser.FileFilter
import org.ggp.base.apps.player.config.ConfigPanel
import external.JSON.JSONException
import external.JSON.JSONObject
class ConfigurableConfigPanel(ConfigPanel, ActionListener, DocumentListener, ChangeListener):
""" generated source for class ConfigurableConfigPanel """
serialVersionUID = 1L
associatedFile = File()
associatedFileField = JTextField()
params = JSONObject()
savedParams = str()
loadButton = JButton()
saveAsButton = JButton()
saveButton = JButton()
name = JTextField()
strategy = JComboBox()
metagameStrategy = JComboBox()
stateMachine = JComboBox()
cacheStateMachine = JCheckBox()
maxPlys = JSpinner()
heuristicFocus = JSpinner()
heuristicMobility = JSpinner()
heuristicOpponentFocus = JSpinner()
heuristicOpponentMobility = JSpinner()
mcDecayRate = JSpinner()
rightPanel = JPanel()
def __init__(self):
""" generated source for method __init__ """
super(ConfigurableConfigPanel, self).__init__(GridBagLayout())
leftPanel = JPanel(GridBagLayout())
leftPanel.setBorder(TitledBorder("Major Parameters"))
self.rightPanel = JPanel(GridBagLayout())
self.rightPanel.setBorder(TitledBorder("Minor Parameters"))
self.strategy = JComboBox([None]*)
self.metagameStrategy = JComboBox([None]*)
self.stateMachine = JComboBox([None]*)
self.cacheStateMachine = JCheckBox()
self.maxPlys = JSpinner(SpinnerNumberModel(1, 1, 100, 1))
self.heuristicFocus = JSpinner(SpinnerNumberModel(1, 0, 10, 1))
self.heuristicMobility = JSpinner(SpinnerNumberModel(1, 0, 10, 1))
self.heuristicOpponentFocus = JSpinner(SpinnerNumberModel(1, 0, 10, 1))
self.heuristicOpponentMobility = JSpinner(SpinnerNumberModel(1, 0, 10, 1))
self.mcDecayRate = JSpinner(SpinnerNumberModel(0, 0, 99, 1))
self.name = JTextField()
self.name.setColumns(20)
self.name.setText("Player #" + Random().nextInt(100000))
self.loadButton = JButton(loadButtonMethod())
self.saveButton = JButton(saveButtonMethod())
self.saveAsButton = JButton(saveAsButtonMethod())
self.associatedFileField = JTextField()
self.associatedFileField.setEnabled(False)
buttons = JPanel()
buttons.add(self.loadButton)
buttons.add(self.saveButton)
buttons.add(self.saveAsButton)
nRow = 0
leftPanel.add(JLabel("Name"), GridBagConstraints(0, nRow, 1, 1, 0.0, 0.0, GridBagConstraints.EAST, GridBagConstraints.NONE, Insets(5, 5, 5, 5), 5, 5))
__nRow_0 = nRow
nRow += 1
leftPanel.add(self.name, GridBagConstraints(1, __nRow_0, 1, 1, 0.0, 0.0, GridBagConstraints.WEST, GridBagConstraints.HORIZONTAL, Insets(5, 5, 5, 5), 5, 5))
leftPanel.add(JLabel("Gaming Strategy"), GridBagConstraints(0, nRow, 1, 1, 0.0, 0.0, GridBagConstraints.EAST, GridBagConstraints.NONE, Insets(5, 5, 5, 5), 5, 5))
__nRow_1 = nRow
nRow += 1
leftPanel.add(self.strategy, GridBagConstraints(1, __nRow_1, 1, 1, 0.0, 0.0, GridBagConstraints.WEST, GridBagConstraints.HORIZONTAL, Insets(5, 5, 5, 5), 5, 5))
leftPanel.add(JLabel("Metagame Strategy"), GridBagConstraints(0, nRow, 1, 1, 0.0, 0.0, GridBagConstraints.EAST, GridBagConstraints.NONE, Insets(5, 5, 5, 5), 5, 5))
__nRow_2 = nRow
nRow += 1
leftPanel.add(self.metagameStrategy, GridBagConstraints(1, __nRow_2, 1, 1, 0.0, 0.0, GridBagConstraints.WEST, GridBagConstraints.HORIZONTAL, Insets(5, 5, 5, 5), 5, 5))
leftPanel.add(JLabel("State Machine"), GridBagConstraints(0, nRow, 1, 1, 0.0, 0.0, GridBagConstraints.EAST, GridBagConstraints.NONE, Insets(5, 5, 5, 5), 5, 5))
__nRow_3 = nRow
nRow += 1
leftPanel.add(self.stateMachine, GridBagConstraints(1, __nRow_3, 1, 1, 0.0, 0.0, GridBagConstraints.WEST, GridBagConstraints.HORIZONTAL, Insets(5, 5, 5, 5), 5, 5))
__nRow_4 = nRow
nRow += 1
leftPanel.add(buttons, GridBagConstraints(1, __nRow_4, 2, 1, 1.0, 1.0, GridBagConstraints.SOUTHEAST, GridBagConstraints.NONE, Insets(5, 5, 0, 5), 0, 0))
leftPanel.add(self.associatedFileField, GridBagConstraints(0, nRow, 2, 1, 1.0, 0.0, GridBagConstraints.SOUTHEAST, GridBagConstraints.HORIZONTAL, Insets(0, 5, 5, 5), 0, 0))
layoutRightPanel()
add(leftPanel, GridBagConstraints(0, 0, 1, 1, 0.0, 1.0, GridBagConstraints.CENTER, GridBagConstraints.BOTH, Insets(5, 5, 5, 5), 5, 5))
add(self.rightPanel, GridBagConstraints(1, 0, 1, 1, 1.0, 1.0, GridBagConstraints.CENTER, GridBagConstraints.BOTH, Insets(5, 5, 5, 5), 5, 5))
self.params = JSONObject()
syncJSONtoUI()
self.strategy.addActionListener(self)
self.metagameStrategy.addActionListener(self)
self.stateMachine.addActionListener(self)
self.cacheStateMachine.addActionListener(self)
self.maxPlys.addChangeListener(self)
self.heuristicFocus.addChangeListener(self)
self.heuristicMobility.addChangeListener(self)
self.heuristicOpponentFocus.addChangeListener(self)
self.heuristicOpponentMobility.addChangeListener(self)
self.mcDecayRate.addChangeListener(self)
self.name.getDocument().addDocumentListener(self)
def layoutRightPanel(self):
""" generated source for method layoutRightPanel """
nRow = 0
self.rightPanel.removeAll()
self.rightPanel.add(JLabel("State machine cache?"), GridBagConstraints(0, nRow, 1, 1, 0.0, 0.0, GridBagConstraints.EAST, GridBagConstraints.NONE, Insets(5, 5, 5, 5), 5, 5))
__nRow_5 = nRow
nRow += 1
self.rightPanel.add(self.cacheStateMachine, GridBagConstraints(1, __nRow_5, 1, 1, 1.0, 0.0, GridBagConstraints.WEST, GridBagConstraints.NONE, Insets(5, 5, 5, 5), 5, 5))
if self.strategy.getSelectedItem().__str__() == "Heuristic":
__nRow_6 = nRow
nRow += 1
__nRow_7 = nRow
nRow += 1
__nRow_8 = nRow
nRow += 1
__nRow_9 = nRow
nRow += 1
__nRow_10 = nRow
nRow += 1
self.rightPanel.add(JLabel("Max plys?"), GridBagConstraints(0, nRow, 1, 1, 0.0, 0.0, GridBagConstraints.EAST, GridBagConstraints.NONE, Insets(5, 5, 5, 5), 5, 5))
self.rightPanel.add(self.maxPlys, GridBagConstraints(1, __nRow_6, 1, 1, 0.0, 0.0, GridBagConstraints.WEST, GridBagConstraints.NONE, Insets(5, 5, 5, 5), 5, 5))
self.rightPanel.add(JLabel("Focus Heuristic Weight"), GridBagConstraints(0, nRow, 1, 1, 0.0, 0.0, GridBagConstraints.EAST, GridBagConstraints.NONE, Insets(5, 5, 5, 5), 5, 5))
self.rightPanel.add(self.heuristicFocus, GridBagConstraints(1, __nRow_7, 1, 1, 0.0, 0.0, GridBagConstraints.WEST, GridBagConstraints.NONE, Insets(5, 5, 5, 5), 5, 5))
self.rightPanel.add(JLabel("Mobility Heuristic Weight"), GridBagConstraints(0, nRow, 1, 1, 0.0, 0.0, GridBagConstraints.EAST, GridBagConstraints.NONE, Insets(5, 5, 5, 5), 5, 5))
self.rightPanel.add(self.heuristicMobility, GridBagConstraints(1, __nRow_8, 1, 1, 0.0, 0.0, GridBagConstraints.WEST, GridBagConstraints.NONE, Insets(5, 5, 5, 5), 5, 5))
self.rightPanel.add(JLabel("Opponent Focus Heuristic Weight"), GridBagConstraints(0, nRow, 1, 1, 0.0, 0.0, GridBagConstraints.EAST, GridBagConstraints.NONE, Insets(5, 5, 5, 5), 5, 5))
self.rightPanel.add(self.heuristicOpponentFocus, GridBagConstraints(1, __nRow_9, 1, 1, 0.0, 0.0, GridBagConstraints.WEST, GridBagConstraints.NONE, Insets(5, 5, 5, 5), 5, 5))
self.rightPanel.add(JLabel("Opponent Mobility Heuristic Weight"), GridBagConstraints(0, nRow, 1, 1, 0.0, 0.0, GridBagConstraints.EAST, GridBagConstraints.NONE, Insets(5, 5, 5, 5), 5, 5))
self.rightPanel.add(self.heuristicOpponentMobility, GridBagConstraints(1, __nRow_10, 1, 1, 0.0, 0.0, GridBagConstraints.WEST, GridBagConstraints.NONE, Insets(5, 5, 5, 5), 5, 5))
if self.strategy.getSelectedItem().__str__() == "Monte Carlo":
__nRow_11 = nRow
nRow += 1
self.rightPanel.add(JLabel("Goal Decay Rate"), GridBagConstraints(0, nRow, 1, 1, 0.0, 0.0, GridBagConstraints.EAST, GridBagConstraints.NONE, Insets(5, 5, 5, 5), 5, 5))
self.rightPanel.add(self.mcDecayRate, GridBagConstraints(1, __nRow_11, 1, 1, 0.0, 0.0, GridBagConstraints.WEST, GridBagConstraints.NONE, Insets(5, 5, 5, 5), 5, 5))
__nRow_12 = nRow
nRow += 1
self.rightPanel.add(JLabel(), GridBagConstraints(2, __nRow_12, 1, 1, 1.0, 1.0, GridBagConstraints.SOUTHEAST, GridBagConstraints.NONE, Insets(5, 5, 5, 5), 5, 5))
self.rightPanel.repaint()
@SuppressWarnings("unchecked")
def getParameter(self, name, defaultValue):
""" generated source for method getParameter """
try:
if self.params.has(name):
return self.params.get(name)
else:
return defaultValue
except JSONException as je:
return defaultValue
def actionPerformed(self, arg0):
""" generated source for method actionPerformed """
if arg0.getSource() == self.strategy:
self.layoutRightPanel()
syncJSONtoUI()
def changedUpdate(self, e):
""" generated source for method changedUpdate """
syncJSONtoUI()
def insertUpdate(self, e):
""" generated source for method insertUpdate """
syncJSONtoUI()
def removeUpdate(self, e):
""" generated source for method removeUpdate """
syncJSONtoUI()
def stateChanged(self, arg0):
""" generated source for method stateChanged """
syncJSONtoUI()
def syncJSONtoUI(self):
""" generated source for method syncJSONtoUI """
if settingUI:
return
self.params = getJSONfromUI()
self.saveButton.setEnabled(self.savedParams == None or not self.params.__str__() == self.savedParams)
def getJSONfromUI(self):
""" generated source for method getJSONfromUI """
newParams = JSONObject()
try:
if not self.name.getText().isEmpty():
newParams.put("name", self.name.getText())
newParams.put("strategy", self.strategy.getSelectedItem().__str__())
newParams.put("metagameStrategy", self.metagameStrategy.getSelectedItem().__str__())
newParams.put("stateMachine", self.stateMachine.getSelectedItem().__str__())
newParams.put("cacheStateMachine", self.cacheStateMachine.isSelected())
newParams.put("maxPlys", self.maxPlys.getModel().getValue())
newParams.put("heuristicFocus", self.heuristicFocus.getModel().getValue())
newParams.put("heuristicMobility", self.heuristicMobility.getModel().getValue())
newParams.put("heuristicOpponentFocus", self.heuristicOpponentFocus.getModel().getValue())
newParams.put("heuristicOpponentMobility", self.heuristicOpponentMobility.getModel().getValue())
newParams.put("mcDecayRate", self.mcDecayRate.getModel().getValue())
except JSONException as je:
je.printStackTrace()
return newParams
settingUI = False
def setUIfromJSON(self):
""" generated source for method setUIfromJSON """
self.settingUI = True
try:
if self.params.has("name"):
self.name.setText(self.params.getString("name"))
if self.params.has("strategy"):
self.strategy.setSelectedItem(self.params.getString("strategy"))
if self.params.has("metagameStrategy"):
self.metagameStrategy.setSelectedItem(self.params.getString("metagameStrategy"))
if self.params.has("stateMachine"):
self.stateMachine.setSelectedItem(self.params.getString("stateMachine"))
if self.params.has("cacheStateMachine"):
self.cacheStateMachine.setSelected(self.params.getBoolean("cacheStateMachine"))
if self.params.has("maxPlys"):
self.maxPlys.getModel().setValue(self.params.getInt("maxPlys"))
if self.params.has("heuristicFocus"):
self.heuristicFocus.getModel().setValue(self.params.getInt("heuristicFocus"))
if self.params.has("heuristicMobility"):
self.heuristicMobility.getModel().setValue(self.params.getInt("heuristicMobility"))
if self.params.has("heuristicOpponentFocus"):
self.heuristicOpponentFocus.getModel().setValue(self.params.getInt("heuristicOpponentFocus"))
if self.params.has("heuristicOpponentMobility"):
self.heuristicOpponentMobility.getModel().setValue(self.params.getInt("heuristicOpponentMobility"))
if self.params.has("mcDecayRate"):
self.mcDecayRate.getModel().setValue(self.params.getInt("mcDecayRate"))
except JSONException as je:
je.printStackTrace()
finally:
self.settingUI = False
def loadParamsJSON(self, fromFile):
""" generated source for method loadParamsJSON """
if not fromFile.exists():
return
self.associatedFile = fromFile
self.associatedFileField.setText(self.associatedFile.getPath())
self.params = JSONObject()
try:
try:
while (line = br.readLine()) != None:
pdata.append(line)
finally:
br.close()
self.params = JSONObject(pdata.__str__())
self.savedParams = self.params.__str__()
self.setUIfromJSON()
self.syncJSONtoUI()
except Exception as e:
e.printStackTrace()
def saveParamsJSON(self, saveAs):
""" generated source for method saveParamsJSON """
try:
if saveAs or self.associatedFile == None:
fc.setFileFilter(PlayerFilter())
if returnVal == JFileChooser.APPROVE_OPTION and fc.getSelectedFile() != None:
if toFile.__name__.contains("."):
self.associatedFile = File(toFile.getParentFile(), toFile.__name__.substring(0, toFile.__name__.lastIndexOf(".")) + ".player")
else:
self.associatedFile = File(toFile.getParentFile(), toFile.__name__ + ".player")
self.associatedFileField.setText(self.associatedFile.getPath())
else:
return
bw.write(self.params.__str__())
bw.close()
self.savedParams = self.params.__str__()
self.syncJSONtoUI()
except IOException as ie:
ie.printStackTrace()
def saveButtonMethod(self):
""" generated source for method saveButtonMethod """
return AbstractAction("Save")
def saveAsButtonMethod(self):
""" generated source for method saveAsButtonMethod """
return AbstractAction("Save As")
def loadButtonMethod(self):
""" generated source for method loadButtonMethod """
return AbstractAction("Load")
class PlayerFilter(FileFilter):
""" generated source for class PlayerFilter """
def accept(self, f):
""" generated source for method accept """
if f.isDirectory():
return True
return f.__name__.endsWith(".player")
def getDescription(self):
""" generated source for method getDescription """
return "GGP Players (*.player)" | 0.543348 | 0.074299 |