CombinedText stringlengths 4 3.42M |
|---|
import argparse
import boto
from boto.utils import get_instance_metadata
from boto.exception import AWSConnectionError
import hipchat
import os
import subprocess
import traceback
import socket
# Services that should be checked for migrations.
MIGRATION_COMMANDS = {
'lms': "{python} {code_dir}/manage.py lms migrate --noinput --settings=aws --db-dry-run --merge",
'cms': "{python} {code_dir}/manage.py cms migrate --noinput --settings=aws --db-dry-run --merge",
'xqueue': "{python} {code_dir}/manage.py xqueue migrate --noinput --settings=aws --db-dry-run --merge",
}
HIPCHAT_USER = "PreSupervisor"
def services_for_instance(instance_id):
"""
Get the list of all services named by the services tag in this
instance's tags.
"""
ec2 = boto.connect_ec2()
reservations = ec2.get_all_instances(instance_ids=[instance_id])
for reservation in reservations:
for instance in reservation.instances:
if instance.id == instance_id:
try:
services = instance.tags['services'].split(',')
except KeyError as ke:
msg = "Tag named 'services' not found on this instance({})".format(instance_id)
raise Exception(msg)
for service in services:
yield service
def edp_for_instance(instance_id):
ec2 = boto.connect_ec2()
reservations = ec2.get_all_instances(instance_ids=[instance_id])
for reservation in reservations:
for instance in reservation.instances:
if instance.id == instance_id:
try:
environment = instance.tags['environment']
deployment = instance.tags['deployment']
play = instance.tags['play']
except KeyError as ke:
msg = "{} tag not found on this instance({})".format(ke.message, instance_id)
raise Exception(msg)
return (environment, deployment, play)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Enable all services that are in the services tag of this ec2 instance.")
parser.add_argument("-a","--available",
help="The location of the available services.")
parser.add_argument("-e","--enabled",
help="The location of the enabled services.")
migration_args = parser.add_argument_group("edxapp_migrations",
"Args for running edxapp migration checks.")
migration_args.add_argument("--edxapp-code-dir",
help="Location of the edx-platform code.")
migration_args.add_argument("--edxapp-python",
help="Path to python to use for executing migration check.")
xq_migration_args = parser.add_argument_group("xqueue_migrations",
"Args for running xqueue migration checks.")
xq_migration_args.add_argument("--xqueue-code-dir",
help="Location of the edx-platform code.")
xq_migration_args.add_argument("--xqueue-python",
help="Path to python to use for executing migration check.")
hipchat_args = parser.add_argument_group("hipchat",
"Args for hipchat notification.")
hipchat_args.add_argument("-c","--hipchat-api-key",
help="Hipchat token if you want to receive notifications via hipchat.")
hipchat_args.add_argument("-r","--hipchat-room",
help="Room to send messages to.")
args = parser.parse_args()
report = []
prefix = None
notify = None
try:
if args.hipchat_api_key:
hc = hipchat.HipChat(token=args.hipchat_api_key)
notify = lambda message: hc.message_room(room_id=args.hipchat_room,
message_from=HIPCHAT_USER, message=message)
except Exception as e:
print("Failed to initialize hipchat, {}".format(e))
traceback.print_exc()
instance_id = get_instance_metadata()['instance-id']
prefix = instance_id
ec2 = boto.connect_ec2()
reservations = ec2.get_all_instances(instance_ids=[instance_id])
instance = reservations[0].instances[0]
if instance.instance_profile['arn'].endswith('/abbey'):
print("Running an abbey build. Not starting any services.")
# Needs to exit with 1 instead of 0 to prevent
# services from starting.
exit(1)
try:
environment, deployment, play = edp_for_instance(instance_id)
prefix = "{environment}-{deployment}-{play}-{instance_id}".format(
environment=environment,
deployment=deployment,
play=play,
instance_id=instance_id)
except:
print("Failed to get EDP for {}".format(instance_id))
#get the hostname of the sandbox
hostname = socket.gethostname()
#get the list of the volumes, that are attached to the instance
volumes = ec2.get_all_volumes(filters={'attachment.instance-id': instance_id})
for volume in volumes:
volume.add_tags({"hostname": hostname,
"environment": environment,
"deployment": deployment,
"cluster": play,
"instance-id": instance_id,
"created": volume.create_time })
try:
for service in services_for_instance(instance_id):
if service in MIGRATION_COMMANDS:
# Do extra migration related stuff.
if (service == 'lms' or service == 'cms') and args.edxapp_code_dir:
cmd = MIGRATION_COMMANDS[service].format(python=args.edxapp_python,
code_dir=args.edxapp_code_dir)
if os.path.exists(args.edxapp_code_dir):
os.chdir(args.edxapp_code_dir)
# Run migration check command.
output = subprocess.check_output(cmd, shell=True)
if 'Migrating' in output:
raise Exception("Migrations have not been run for {}".format(service))
elif service == 'xqueue' and args.xqueue_code_dir:
cmd = MIGRATION_COMMANDS[service].format(python=args.xqueue_python,
code_dir=xqueue_code_dir)
if os.path.exists(args.xqueue_code_dir):
os.chdir(args.xqueue_code_dir)
# Run migration check command.
output = subprocess.check_output(cmd, shell=True)
if 'Migrating' in output:
raise Exception("Migrations have not been run for {}".format(service))
# Link to available service.
available_file = os.path.join(args.available, "{}.conf".format(service))
link_location = os.path.join(args.enabled, "{}.conf".format(service))
if os.path.exists(available_file):
subprocess.call("ln -sf {} {}".format(available_file, link_location), shell=True)
report.append("Linking service: {}".format(service))
else:
raise Exception("No conf available for service: {}".format(link_location))
except AWSConnectionError as ae:
msg = "{}: ERROR : {}".format(prefix, ae)
if notify:
notify(msg)
notify(traceback.format_exc())
raise ae
except Exception as e:
msg = "{}: ERROR : {}".format(prefix, e)
print(msg)
if notify:
notify(msg)
traceback.print_exc()
else:
msg = "{}: {}".format(prefix, " | ".join(report))
print(msg)
if notify:
notify(msg)
Wait up to 2 minutes for instance tags.
import argparse
import boto
from boto.utils import get_instance_metadata
from boto.exception import AWSConnectionError
import hipchat
import os
import subprocess
import traceback
import socket
import time
# Services that should be checked for migrations.
MIGRATION_COMMANDS = {
'lms': "{python} {code_dir}/manage.py lms migrate --noinput --settings=aws --db-dry-run --merge",
'cms': "{python} {code_dir}/manage.py cms migrate --noinput --settings=aws --db-dry-run --merge",
'xqueue': "{python} {code_dir}/manage.py xqueue migrate --noinput --settings=aws --db-dry-run --merge",
}
HIPCHAT_USER = "PreSupervisor"
# Max amount of time to wait for tags to be applied.
MAX_BACKOFF = 120
def services_for_instance(instance_id):
"""
Get the list of all services named by the services tag in this
instance's tags.
"""
ec2 = boto.connect_ec2()
reservations = ec2.get_all_instances(instance_ids=[instance_id])
for reservation in reservations:
for instance in reservation.instances:
if instance.id == instance_id:
try:
services = instance.tags['services'].split(',')
except KeyError as ke:
msg = "Tag named 'services' not found on this instance({})".format(instance_id)
raise Exception(msg)
for service in services:
yield service
def edp_for_instance(instance_id):
ec2 = boto.connect_ec2()
reservations = ec2.get_all_instances(instance_ids=[instance_id])
for reservation in reservations:
for instance in reservation.instances:
if instance.id == instance_id:
try:
environment = instance.tags['environment']
deployment = instance.tags['deployment']
play = instance.tags['play']
except KeyError as ke:
msg = "{} tag not found on this instance({})".format(ke.message, instance_id)
raise Exception(msg)
return (environment, deployment, play)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Enable all services that are in the services tag of this ec2 instance.")
parser.add_argument("-a","--available",
help="The location of the available services.")
parser.add_argument("-e","--enabled",
help="The location of the enabled services.")
migration_args = parser.add_argument_group("edxapp_migrations",
"Args for running edxapp migration checks.")
migration_args.add_argument("--edxapp-code-dir",
help="Location of the edx-platform code.")
migration_args.add_argument("--edxapp-python",
help="Path to python to use for executing migration check.")
xq_migration_args = parser.add_argument_group("xqueue_migrations",
"Args for running xqueue migration checks.")
xq_migration_args.add_argument("--xqueue-code-dir",
help="Location of the edx-platform code.")
xq_migration_args.add_argument("--xqueue-python",
help="Path to python to use for executing migration check.")
hipchat_args = parser.add_argument_group("hipchat",
"Args for hipchat notification.")
hipchat_args.add_argument("-c","--hipchat-api-key",
help="Hipchat token if you want to receive notifications via hipchat.")
hipchat_args.add_argument("-r","--hipchat-room",
help="Room to send messages to.")
args = parser.parse_args()
report = []
prefix = None
notify = None
try:
if args.hipchat_api_key:
hc = hipchat.HipChat(token=args.hipchat_api_key)
notify = lambda message: hc.message_room(room_id=args.hipchat_room,
message_from=HIPCHAT_USER, message=message)
except Exception as e:
print("Failed to initialize hipchat, {}".format(e))
traceback.print_exc()
instance_id = get_instance_metadata()['instance-id']
prefix = instance_id
ec2 = boto.connect_ec2()
reservations = ec2.get_all_instances(instance_ids=[instance_id])
instance = reservations[0].instances[0]
if instance.instance_profile['arn'].endswith('/abbey'):
print("Running an abbey build. Not starting any services.")
# Needs to exit with 1 instead of 0 to prevent
# services from starting.
exit(1)
time_left = MAX_BACKOFF
backoff = INITIAL_BACKOFF
environment = None
deployment = None
play = None
while time_left > 0:
try:
environment, deployment, play = edp_for_instance(instance_id)
prefix = "{environment}-{deployment}-{play}-{instance_id}".format(
environment=environment,
deployment=deployment,
play=play,
instance_id=instance_id)
break
except:
print("Failed to get EDP for {}".format(instance_id))
time.sleep(backoff)
backoff = backoff * 2
if environment is None or deployment is None or play is None:
msg = "Unable to retrieve environment, deployment, or play tag."
print(msg)
notify("{} : {}".format(prefix, msg))
exit(0)
#get the hostname of the sandbox
hostname = socket.gethostname()
#get the list of the volumes, that are attached to the instance
volumes = ec2.get_all_volumes(filters={'attachment.instance-id': instance_id})
for volume in volumes:
volume.add_tags({"hostname": hostname,
"environment": environment,
"deployment": deployment,
"cluster": play,
"instance-id": instance_id,
"created": volume.create_time })
try:
for service in services_for_instance(instance_id):
if service in MIGRATION_COMMANDS:
# Do extra migration related stuff.
if (service == 'lms' or service == 'cms') and args.edxapp_code_dir:
cmd = MIGRATION_COMMANDS[service].format(python=args.edxapp_python,
code_dir=args.edxapp_code_dir)
if os.path.exists(args.edxapp_code_dir):
os.chdir(args.edxapp_code_dir)
# Run migration check command.
output = subprocess.check_output(cmd, shell=True)
if 'Migrating' in output:
raise Exception("Migrations have not been run for {}".format(service))
elif service == 'xqueue' and args.xqueue_code_dir:
cmd = MIGRATION_COMMANDS[service].format(python=args.xqueue_python,
code_dir=xqueue_code_dir)
if os.path.exists(args.xqueue_code_dir):
os.chdir(args.xqueue_code_dir)
# Run migration check command.
output = subprocess.check_output(cmd, shell=True)
if 'Migrating' in output:
raise Exception("Migrations have not been run for {}".format(service))
# Link to available service.
available_file = os.path.join(args.available, "{}.conf".format(service))
link_location = os.path.join(args.enabled, "{}.conf".format(service))
if os.path.exists(available_file):
subprocess.call("ln -sf {} {}".format(available_file, link_location), shell=True)
report.append("Linking service: {}".format(service))
else:
raise Exception("No conf available for service: {}".format(link_location))
except AWSConnectionError as ae:
msg = "{}: ERROR : {}".format(prefix, ae)
if notify:
notify(msg)
notify(traceback.format_exc())
raise ae
except Exception as e:
msg = "{}: ERROR : {}".format(prefix, e)
print(msg)
if notify:
notify(msg)
traceback.print_exc()
else:
msg = "{}: {}".format(prefix, " | ".join(report))
print(msg)
if notify:
notify(msg)
|
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Class for handling arithmetic of polynomials in Z[x]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from itertools import zip_longest as lzip
from copy import copy
from .integer import decompose
class IntPolynomial(object):
"""An `IntPolynomial` is a polynomial with integer coefficients. It's
represented by a tuple of integers and can be initialized either by
an integer or by an iterable that can be converted to a tuple of
integers. Note trailing zeros are discarded at the initializing stage.
"""
def __init__(self, coef=0):
if isinstance(coef, int):
self.coef = (coef,)
else:
self.coef = self.discard_trailing_zeros(tuple(coef))
# degree of this polynomial
self.D = len(self.coef) - 1
@staticmethod
def discard_trailing_zeros(a):
"""Discard traling zeros in an array `a`.
"""
i = len(a) - 1
while (i > 0 and a[i] == 0):
i -= 1
return a[:i+1]
def __str__(self):
return "IntPolynomial" + str(self.coef)
def __getitem__(self, items):
return self.coef[items]
def __bool__(self):
"""Check whether this is a zero polynomial. Note a non-zero constant
is not a zero polynomial.
"""
return self.D > 0 or self[0] != 0
def __neg__(self):
return IntPolynomial(-x for x in self)
@staticmethod
def valid(g):
"""Check input for polynomial operations.
"""
if not isinstance(g, (int, IntPolynomial)):
raise ValueError("Only integers and IntPolynomials are allowed for polynomial operations.")
if isinstance(g, int):
g = IntPolynomial(g)
return g
def __add__(self, g):
"""Add a polynomial or an integer.
"""
g = self.valid(g)
return IntPolynomial(x + y for x, y in lzip(self, g, fillvalue=0))
__iadd__ = __radd__ = __add__
def __sub__(self, g):
g = self.valid(g)
return IntPolynomial(x - y for x, y in lzip(self, g, fillvalue=0))
__isub__ = __sub__
def __rsub__(self, g):
return -self + g
def __eq__(self, g):
if not isinstance(g, (int, IntPolynomial)):
return False
return not bool(self - g)
def __mul__(self, g):
g = self.valid(g)
d1, d2 = self.D, g.D
h = [0] * (d1 + d2 + 1)
for i in range(d1 + 1):
for j in range(d2 + 1):
h[i + j] += self[i] * g[j]
return IntPolynomial(h)
__imul__ = __rmul__ = __mul__
@classmethod
def monomial(cls, n, a):
"""Return the monomial a*x**n.
"""
coef = [0] * (n + 1)
coef[n] = a
return cls(coef)
def __divmod__(self, g):
g = self.valid(g)
d1 = self.D
d2 = g.D
if g[d2] != 1:
raise ValueError("The divisor must be a monic polynomial")
if d1 < d2:
return IntPolynomial(0), self
# if the divisor is a constant 1
if d2 == 0:
return self, IntPolynomial(0)
f = copy(self)
q = 0
while f.D >= d2:
m = self.monomial(f.D - d2, f[f.D])
q += m
f -= m * g
return q, f
def __mod__(self, g):
return divmod(self, g)[1]
def __floordiv__(self, g):
return divmod(self, g)[0]
@classmethod
def cyclotomic(cls, n):
r"""
Return the cyclotomic polynomial \Phi_n(x) for the n-th primitive root of unity:
\Phi_n(x) = \prod (x^{n/d}-1)^{\mu(d)},
where d runs over all divisors of n and \mu(d) is the Mobius function:
\mu(d) = 0 iff d contains a square factor.
\mu(d) = 1 iff d is a product of even number of different primes.
\mu(d) = -1 iff d is a product of odd number of different primes.
Examples:
>>> f = IntPolynomial.cyclotomic(8)
>>> f
>>> IntPolynomial(1, 0, 0, 1)
>>> f = IntPolynomial.cyclotomic(12)
>>> f
>>> IntPolynomial(1, 0, -1, 0, 1)
"""
if n == 1:
return cls((-1, 1))
f = 1
g = 1
primes = list(decompose(n).keys())
num_square_free_factors = 1 << len(primes)
for k in range(num_square_free_factors):
d = 1
for i, e in enumerate(primes):
if (k & (1 << i)) != 0:
d *= e
m = cls.monomial(n // d, 1) - 1
b = bin(k).count("1")
if b % 2 == 0:
f *= m
else:
g *= m
return f // g
reindent polynomial.py
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Class for handling arithmetic of polynomials in Z[x]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from itertools import zip_longest as lzip
from copy import copy
from .integer import decompose
class IntPolynomial(object):
"""
A class for handling arithmetic of polynomials with integer coefficients.
A polynomial is represented by a tuple of integers and can be initialized
either by an integer or by an iterable that yields a tuple of integers.
Note trailing zeros are discarded at the initializing stage.
"""
def __init__(self, coef=0):
if isinstance(coef, int):
self.coef = (coef,)
else:
self.coef = self.discard_trailing_zeros(tuple(coef))
# degree of this polynomial
self.D = len(self.coef) - 1
@staticmethod
def discard_trailing_zeros(arr):
"""
Discard traling zeros in an array.
"""
i = len(arr) - 1
while (i > 0 and arr[i] == 0):
i -= 1
return arr[:i+1]
def __str__(self):
return "IntPolynomial" + str(self.coef)
def __getitem__(self, items):
return self.coef[items]
def __bool__(self):
"""
Check whether this polynomial is zero.
"""
return self.D > 0 or self[0] != 0
def __neg__(self):
return IntPolynomial(-x for x in self)
@staticmethod
def valid(g):
"""
Check input for polynomial operations.
"""
if not isinstance(g, (int, IntPolynomial)):
raise ValueError("type {} not supported for polynomial operations".format(type(g)))
if isinstance(g, int):
g = IntPolynomial(g)
return g
def __add__(self, g): # f + g
"""
Addition with another polynomial or an integer.
"""
g = self.valid(g)
return IntPolynomial(x + y for x, y in lzip(self, g, fillvalue=0))
__iadd__ = __radd__ = __add__
def __sub__(self, g): # f - g
g = self.valid(g)
return IntPolynomial(x - y for x, y in lzip(self, g, fillvalue=0))
__isub__ = __sub__
def __rsub__(self, g):
return -self + g
def __eq__(self, g): # f == g
if not isinstance(g, (int, IntPolynomial)):
return False
return not bool(self - g)
def __mul__(self, g): # f * g
g = self.valid(g)
d1, d2 = self.D, g.D
h = [0] * (d1 + d2 + 1)
for i in range(d1 + 1):
for j in range(d2 + 1):
h[i + j] += self[i] * g[j]
return IntPolynomial(h)
__imul__ = __rmul__ = __mul__
@classmethod
def monomial(cls, n, a):
"""
Return the monomial a*x^n.
"""
coef = [0] * (n + 1)
coef[n] = a
return cls(coef)
def __divmod__(self, g):
g = self.valid(g)
d1 = self.D
d2 = g.D
if g[d2] != 1:
raise ValueError("The divisor must be a monic polynomial")
if d1 < d2:
return IntPolynomial(0), self
# if the divisor is a constant 1
if d2 == 0:
return self, IntPolynomial(0)
f = copy(self)
q = 0
while f.D >= d2:
m = self.monomial(f.D - d2, f[f.D])
q += m
f -= m * g
return q, f
def __mod__(self, g):
return divmod(self, g)[1]
def __floordiv__(self, g):
return divmod(self, g)[0]
@classmethod
def cyclotomic(cls, n):
r"""
Return the cyclotomic polynomial \Phi_n(x) for the n-th primitive root of unity:
\Phi_n(x) = \prod (x^{n/d}-1)^{\mu(d)},
where d runs over all divisors of n and \mu(d) is the Mobius function:
\mu(d) = 0 iff d contains a square factor.
\mu(d) = 1 iff d is a product of even number of different primes.
\mu(d) = -1 iff d is a product of odd number of different primes.
Example:
>>> f = IntPolynomial.cyclotomic(8)
>>> f
>>> IntPolynomial(1, 0, 0, 1)
>>> f = IntPolynomial.cyclotomic(12)
>>> f
>>> IntPolynomial(1, 0, -1, 0, 1)
"""
if n == 1:
return cls((-1, 1))
f = 1
g = 1
primes = list(decompose(n).keys())
num_square_free_factors = 1 << len(primes)
for k in range(num_square_free_factors):
d = 1
for i, e in enumerate(primes):
if (k & (1 << i)) != 0:
d *= e
m = cls.monomial(n // d, 1) - 1
b = bin(k).count("1")
if b % 2 == 0:
f *= m
else:
g *= m
return f // g
|
import boto3
import argparse
import time
# Usage - python invalidate.py --url 'www.example.com'
# REQUIRED - AWS Profile/Key
PROFILE_VAR = 'cf-invalidation'
parser = argparse.ArgumentParser()
parser.add_argument('--url', required=True, type=str)
args = parser.parse_args()
url = args.url
session = boto3.Session(profile_name=PROFILE_VAR)
client = session.client('cloudfront')
# Get distribution ID
response = client.list_distributions()
distributions = response['DistributionList']['Items']
for distribution in distributions:
if int(distribution['Aliases']['Quantity'] > 0) and url in distribution['Aliases']['Items']:
dist_id = distribution['Id']
break
timestart = time.time()
invalidation = client.create_invalidation(DistributionId=dist_id,
InvalidationBatch={'Paths': {'Quantity': 1,'Items': ['/*',]},
'CallerReference': str(timestart)})
invalidation_id = invalidation['Invalidation']['Id']
print('Creating invalidation for %s (id: %s)...' % (url, invalidation_id))
status = client.get_invalidation(DistributionId=dist_id,Id=invalidation_id)['Invalidation']['Status']
while (status != 'Completed'):
print('Invalidating...%d sec elapsed' % (time.time()-timestart))
time.sleep(10)
status = client.get_invalidation(DistributionId=dist_id,Id=invalidation_id)['Invalidation']['Status']
print('Completed.')
updating invalidate-url.py to fix the permission issue
import boto3
import argparse
import time
import os
# Usage - python invalidate.py --url 'www.example.com'
parser = argparse.ArgumentParser()
parser.add_argument('--url', required=True, type=str)
args = parser.parse_args()
url = args.url
session = boto3.Session (
aws_access_key_id=os.environ['AWS_ACCESS_KEY_ID'],
aws_secret_access_key=os.environ['AWS_SECRET_ACCESS_KEY'],
region_name='us-east-1',
# REQUIRED - AWS Profile/Key
profile_name=os.environ['AWS_PROFILE'])
client = session.client('cloudfront')
# Get distribution ID
response = client.list_distributions()
distributions = response['DistributionList']['Items']
for distribution in distributions:
if int(distribution['Aliases']['Quantity'] > 0) and url in distribution['Aliases']['Items']:
dist_id = distribution['Id']
break
timestart = time.time()
invalidation = client.create_invalidation(DistributionId=dist_id,
InvalidationBatch={'Paths': {'Quantity': 1,'Items': ['/*',]},
'CallerReference': str(timestart)})
invalidation_id = invalidation['Invalidation']['Id']
print('Creating invalidation for %s (id: %s)...' % (url, invalidation_id))
status = client.get_invalidation(DistributionId=dist_id,Id=invalidation_id)['Invalidation']['Status']
while (status != 'Completed'):
print('Invalidating...%d sec elapsed' % (time.time()-timestart))
time.sleep(10)
status = client.get_invalidation(DistributionId=dist_id,Id=invalidation_id)['Invalidation']['Status']
print('Completed.')
|
from HARK.ConsumptionSaving.ConsIndShockModel import \
IndShockConsumerType, ConsIndShockSolverBasic
import HARK.ConsumptionSaving.ConsumerParameters as Params
import numpy as np
import unittest
class testIndShockConsumerType(unittest.TestCase):
def setUp(self):
self.agent = IndShockConsumerType(
AgentCount = 2,
T_sim = 10
)
self.agent.solve()
def test_getShocks(self):
self.agent.initializeSim()
self.agent.simBirth(np.array([True,False]))
self.agent.simOnePeriod()
self.agent.simBirth(np.array([False,True]))
self.agent.getShocks()
self.assertEqual(self.agent.PermShkNow[0],
1.0050166461586711)
self.assertEqual(self.agent.PermShkNow[1],
1.0050166461586711)
self.assertEqual(self.agent.TranShkNow[0],
1.1176912196531754)
def test_ConsIndShockSolverBasic(self):
LifecycleExample = IndShockConsumerType(
**Params.init_lifecycle)
LifecycleExample.cycles = 1
LifecycleExample.solve()
# test the solution_terminal
self.assertAlmostEqual(
LifecycleExample.solution[10].cFunc(2).tolist(),
2)
solver = ConsIndShockSolverBasic(
LifecycleExample.solution[-2],
LifecycleExample.IncomeDstn[0],
LifecycleExample.LivPrb[0],
LifecycleExample.DiscFac,
LifecycleExample.CRRA,
LifecycleExample.Rfree,
LifecycleExample.PermGroFac[0],
LifecycleExample.BoroCnstArt,
LifecycleExample.aXtraGrid,
LifecycleExample.vFuncBool,
LifecycleExample.CubicBool)
solver.prepareToSolve()
self.assertAlmostEqual(solver.DiscFacEff,
0.9503999999999999)
self.assertAlmostEqual(solver.PermShkMinNext,
0.850430160026919)
self.assertAlmostEqual(solver.cFuncNowCnst(4).tolist(),
4.0)
self.assertAlmostEqual(solver.prepareToCalcEndOfPrdvP()[0],
-0.2491750859108316)
self.assertAlmostEqual(solver.prepareToCalcEndOfPrdvP()[-1],
19.74982491408914)
EndOfPrdvP = solver.calcEndOfPrdvP()
self.assertAlmostEqual(EndOfPrdvP[0],
6622.251864311334)
self.assertAlmostEqual(EndOfPrdvP[-1],
0.026301061207747087)
solution = solver.makeBasicSolution(EndOfPrdvP,
solver.aNrmNow,
solver.makeLinearcFunc)
solver.addMPCandHumanWealth(solution)
self.assertAlmostEqual(solution.cFunc(4).tolist(),
1.7391265696400773)
def test_simulated_values(self):
self.agent.initializeSim()
self.agent.simulate()
print(self.agent.aLvlNow)
self.assertAlmostEqual(self.agent.MPCnow[1],
0.5535801655448935)
self.assertAlmostEqual(self.agent.aLvlNow[1],
0.18832361)
class testBufferStock(unittest.TestCase):
""" Tests of the results of the BufferStock REMARK.
"""
def setUp(self):
# Make a dictionary containing all parameters needed to solve the model
self.base_params = Params.init_idiosyncratic_shocks
# Set the parameters for the baseline results in the paper
# using the variable values defined in the cell above
self.base_params['PermGroFac'] = [1.03]
self.base_params['Rfree'] = 1.04
self.base_params['DiscFac'] = 0.96
self.base_params['CRRA'] = 2.00
self.base_params['UnempPrb'] = 0.005
self.base_params['IncUnemp'] = 0.0
self.base_params['PermShkStd'] = [0.1]
self.base_params['TranShkStd'] = [0.1]
self.base_params['LivPrb'] = [1.0]
self.base_params['CubicBool'] = True
self.base_params['T_cycle'] = 1
self.base_params['BoroCnstArt'] = None
def test_baseEx(self):
baseEx = IndShockConsumerType(**self.base_params)
baseEx.cycles = 100 # Make this type have a finite horizon (Set T = 100)
baseEx.solve()
baseEx.unpackcFunc()
m = np.linspace(0,9.5,1000)
c_m = baseEx.cFunc[0](m)
c_t1 = baseEx.cFunc[-2](m)
c_t5 = baseEx.cFunc[-6](m)
c_t10 = baseEx.cFunc[-11](m)
self.assertAlmostEqual(c_m[500], 1.4008090582203356)
self.assertAlmostEqual(c_t1[500], 2.9227437159255216)
self.assertAlmostEqual(c_t5[500], 1.7350607327187664)
self.assertAlmostEqual(c_t10[500], 1.4991390649979213)
self.assertAlmostEqual(c_t10[600], 1.6101476268581576)
self.assertAlmostEqual(c_t10[700], 1.7196531041366991)
def test_GICFails(self):
GIC_fail_dictionary = dict(self.base_params)
GIC_fail_dictionary['Rfree'] = 1.08
GIC_fail_dictionary['PermGroFac'] = [1.00]
GICFailExample = IndShockConsumerType(
cycles=0, # cycles=0 makes this an infinite horizon consumer
**GIC_fail_dictionary)
GICFailExample.solve()
GICFailExample.unpackcFunc()
m = np.linspace(0,5,1000)
c_m = GICFailExample.cFunc[0](m)
self.assertAlmostEqual(c_m[500], 0.7772637042393458)
self.assertAlmostEqual(c_m[700], 0.8392649061916746)
self.assertFalse(GICFailExample.GICPF)
def test_infinite_horizon(self):
baseEx_inf = IndShockConsumerType(cycles=0,
**self.base_params)
baseEx_inf.solve()
baseEx_inf.unpackcFunc()
m1 = np.linspace(1,baseEx_inf.solution[0].mNrmSS,50) # m1 defines the plot range on the left of target m value (e.g. m <= target m)
c_m1 = baseEx_inf.cFunc[0](m1)
self.assertAlmostEqual(c_m1[0], 0.8527887545025995)
self.assertAlmostEqual(c_m1[-1], 1.0036279936408656)
x1 = np.linspace(0,25,1000)
cfunc_m = baseEx_inf.cFunc[0](x1)
self.assertAlmostEqual(cfunc_m[500], 1.8902146173138235)
self.assertAlmostEqual(cfunc_m[700], 2.1591451850267176)
m = np.linspace(0.001,8,1000)
# Use the HARK method derivative to get the derivative of cFunc, and the values are just the MPC
MPC = baseEx_inf.cFunc[0].derivative(m)
self.assertAlmostEqual(MPC[500], 0.08415000641504392)
self.assertAlmostEqual(MPC[700], 0.07173144137912524)
IdiosyncDict={
# Parameters shared with the perfect foresight model
"CRRA": 2.0, # Coefficient of relative risk aversion
"Rfree": 1.03, # Interest factor on assets
"DiscFac": 0.96, # Intertemporal discount factor
"LivPrb" : [0.98], # Survival probability
"PermGroFac" :[1.01], # Permanent income growth factor
# Parameters that specify the income distribution over the lifecycle
"PermShkStd" : [0.1], # Standard deviation of log permanent shocks to income
"PermShkCount" : 7, # Number of points in discrete approximation to permanent income shocks
"TranShkStd" : [0.2], # Standard deviation of log transitory shocks to income
"TranShkCount" : 7, # Number of points in discrete approximation to transitory income shocks
"UnempPrb" : 0.05, # Probability of unemployment while working
"IncUnemp" : 0.3, # Unemployment benefits replacement rate
"UnempPrbRet" : 0.0005, # Probability of "unemployment" while retired
"IncUnempRet" : 0.0, # "Unemployment" benefits when retired
"T_retire" : 0, # Period of retirement (0 --> no retirement)
"tax_rate" : 0.0, # Flat income tax rate (legacy parameter, will be removed in future)
# Parameters for constructing the "assets above minimum" grid
"aXtraMin" : 0.001, # Minimum end-of-period "assets above minimum" value
"aXtraMax" : 20, # Maximum end-of-period "assets above minimum" value
"aXtraCount" : 48, # Number of points in the base grid of "assets above minimum"
"aXtraNestFac" : 3, # Exponential nesting factor when constructing "assets above minimum" grid
"aXtraExtra" : [None], # Additional values to add to aXtraGrid
# A few other paramaters
"BoroCnstArt" : 0.0, # Artificial borrowing constraint; imposed minimum level of end-of period assets
"vFuncBool" : True, # Whether to calculate the value function during solution
"CubicBool" : False, # Preference shocks currently only compatible with linear cFunc
"T_cycle" : 1, # Number of periods in the cycle for this agent type
# Parameters only used in simulation
"AgentCount" : 10000, # Number of agents of this type
"T_sim" : 120, # Number of periods to simulate
"aNrmInitMean" : -6.0, # Mean of log initial assets
"aNrmInitStd" : 1.0, # Standard deviation of log initial assets
"pLvlInitMean" : 0.0, # Mean of log initial permanent income
"pLvlInitStd" : 0.0, # Standard deviation of log initial permanent income
"PermGroFacAgg" : 1.0, # Aggregate permanent income growth factor
"T_age" : None, # Age after which simulated agents are automatically killed
}
class testIndShockConsumerTypeExample(unittest.TestCase):
def test_infinite_horizon(self):
IndShockExample = IndShockConsumerType(**IdiosyncDict)
IndShockExample.cycles = 0 # Make this type have an infinite horizon
IndShockExample.solve()
self.assertAlmostEqual(IndShockExample.solution[0].mNrmSS,
1.5488165705077026)
self.assertAlmostEqual(IndShockExample.solution[0].cFunc.functions[0].x_list[0],
-0.25017509)
IndShockExample.track_vars = ['aNrmNow','mNrmNow','cNrmNow','pLvlNow']
IndShockExample.initializeSim()
IndShockExample.simulate()
self.assertAlmostEqual(IndShockExample.mNrmNow_hist[0][0],
1.0170176090252379)
LifecycleDict={ # Click arrow to expand this fairly large parameter dictionary
# Parameters shared with the perfect foresight model
"CRRA": 2.0, # Coefficient of relative risk aversion
"Rfree": 1.03, # Interest factor on assets
"DiscFac": 0.96, # Intertemporal discount factor
"LivPrb" : [0.99,0.9,0.8,0.7,0.6,0.5,0.4,0.3,0.2,0.1],
"PermGroFac" : [1.01,1.01,1.01,1.02,1.02,1.02,0.7,1.0,1.0,1.0],
# Parameters that specify the income distribution over the lifecycle
"PermShkStd" : [0.1,0.2,0.1,0.2,0.1,0.2,0.1,0,0,0],
"PermShkCount" : 7, # Number of points in discrete approximation to permanent income shocks
"TranShkStd" : [0.3,0.2,0.1,0.3,0.2,0.1,0.3,0,0,0],
"TranShkCount" : 7, # Number of points in discrete approximation to transitory income shocks
"UnempPrb" : 0.05, # Probability of unemployment while working
"IncUnemp" : 0.3, # Unemployment benefits replacement rate
"UnempPrbRet" : 0.0005, # Probability of "unemployment" while retired
"IncUnempRet" : 0.0, # "Unemployment" benefits when retired
"T_retire" : 7, # Period of retirement (0 --> no retirement)
"tax_rate" : 0.0, # Flat income tax rate (legacy parameter, will be removed in future)
# Parameters for constructing the "assets above minimum" grid
"aXtraMin" : 0.001, # Minimum end-of-period "assets above minimum" value
"aXtraMax" : 20, # Maximum end-of-period "assets above minimum" value
"aXtraCount" : 48, # Number of points in the base grid of "assets above minimum"
"aXtraNestFac" : 3, # Exponential nesting factor when constructing "assets above minimum" grid
"aXtraExtra" : [None], # Additional values to add to aXtraGrid
# A few other paramaters
"BoroCnstArt" : 0.0, # Artificial borrowing constraint; imposed minimum level of end-of period assets
"vFuncBool" : True, # Whether to calculate the value function during solution
"CubicBool" : False, # Preference shocks currently only compatible with linear cFunc
"T_cycle" : 10, # Number of periods in the cycle for this agent type
# Parameters only used in simulation
"AgentCount" : 10000, # Number of agents of this type
"T_sim" : 120, # Number of periods to simulate
"aNrmInitMean" : -6.0, # Mean of log initial assets
"aNrmInitStd" : 1.0, # Standard deviation of log initial assets
"pLvlInitMean" : 0.0, # Mean of log initial permanent income
"pLvlInitStd" : 0.0, # Standard deviation of log initial permanent income
"PermGroFacAgg" : 1.0, # Aggregate permanent income growth factor
"T_age" : 11, # Age after which simulated agents are automatically killed
}
class testIndShockConsumerTypeLifecycle(unittest.TestCase):
def test_lifecyle(self):
LifecycleExample = IndShockConsumerType(**LifecycleDict)
LifecycleExample.cycles = 1
LifecycleExample.solve()
self.assertEquals(len(LifecycleExample.solution), 11)
mMin = np.min([LifecycleExample.solution[t].mNrmMin for t in
range(LifecycleExample.T_cycle)])
self.assertAlmostEqual(LifecycleExample.solution[5].cFunc(3).tolist(),
2.129983771775666)
CyclicalDict = {
# Parameters shared with the perfect foresight model
"CRRA": 2.0, # Coefficient of relative risk aversion
"Rfree": 1.03, # Interest factor on assets
"DiscFac": 0.96, # Intertemporal discount factor
"LivPrb" : 4*[0.98], # Survival probability
"PermGroFac" : [1.082251, 2.8, 0.3, 1.1],
# Parameters that specify the income distribution over the lifecycle
"PermShkStd" : [0.1,0.1,0.1,0.1],
"PermShkCount" : 7, # Number of points in discrete approximation to permanent income shocks
"TranShkStd" : [0.2,0.2,0.2,0.2],
"TranShkCount" : 7, # Number of points in discrete approximation to transitory income shocks
"UnempPrb" : 0.05, # Probability of unemployment while working
"IncUnemp" : 0.3, # Unemployment benefits replacement rate
"UnempPrbRet" : 0.0005, # Probability of "unemployment" while retired
"IncUnempRet" : 0.0, # "Unemployment" benefits when retired
"T_retire" : 0, # Period of retirement (0 --> no retirement)
"tax_rate" : 0.0, # Flat income tax rate (legacy parameter, will be removed in future)
# Parameters for constructing the "assets above minimum" grid
"aXtraMin" : 0.001, # Minimum end-of-period "assets above minimum" value
"aXtraMax" : 20, # Maximum end-of-period "assets above minimum" value
"aXtraCount" : 48, # Number of points in the base grid of "assets above minimum"
"aXtraNestFac" : 3, # Exponential nesting factor when constructing "assets above minimum" grid
"aXtraExtra" : [None], # Additional values to add to aXtraGrid
# A few other paramaters
"BoroCnstArt" : 0.0, # Artificial borrowing constraint; imposed minimum level of end-of period assets
"vFuncBool" : True, # Whether to calculate the value function during solution
"CubicBool" : False, # Preference shocks currently only compatible with linear cFunc
"T_cycle" : 4, # Number of periods in the cycle for this agent type
# Parameters only used in simulation
"AgentCount" : 10000, # Number of agents of this type
"T_sim" : 120, # Number of periods to simulate
"aNrmInitMean" : -6.0, # Mean of log initial assets
"aNrmInitStd" : 1.0, # Standard deviation of log initial assets
"pLvlInitMean" : 0.0, # Mean of log initial permanent income
"pLvlInitStd" : 0.0, # Standard deviation of log initial permanent income
"PermGroFacAgg" : 1.0, # Aggregate permanent income growth factor
"T_age" : None, # Age after which simulated agents are automatically killed
}
class testIndShockConsumerTypeLifecycle(unittest.TestCase):
def test_lifecyle(self):
CyclicalExample = IndShockConsumerType(**CyclicalDict)
CyclicalExample.cycles = 0 # Make this consumer type have an infinite horizon
CyclicalExample.solve()
self.assertAlmostEqual(CyclicalExample.solution[3].cFunc(3).tolist(),
1.5958390056965004)
adding tests for LifecycleExample solution consumption function
from HARK.ConsumptionSaving.ConsIndShockModel import \
IndShockConsumerType, ConsIndShockSolverBasic
import HARK.ConsumptionSaving.ConsumerParameters as Params
import numpy as np
import unittest
class testIndShockConsumerType(unittest.TestCase):
def setUp(self):
self.agent = IndShockConsumerType(
AgentCount = 2,
T_sim = 10
)
self.agent.solve()
def test_getShocks(self):
self.agent.initializeSim()
self.agent.simBirth(np.array([True,False]))
self.agent.simOnePeriod()
self.agent.simBirth(np.array([False,True]))
self.agent.getShocks()
self.assertEqual(self.agent.PermShkNow[0],
1.0050166461586711)
self.assertEqual(self.agent.PermShkNow[1],
1.0050166461586711)
self.assertEqual(self.agent.TranShkNow[0],
1.1176912196531754)
def test_ConsIndShockSolverBasic(self):
LifecycleExample = IndShockConsumerType(
**Params.init_lifecycle)
LifecycleExample.cycles = 1
LifecycleExample.solve()
self.assertAlmostEqual(
LifecycleExample.solution[0].cFunc(1).tolist(),
0.87362789)
self.assertAlmostEqual(
LifecycleExample.solution[1].cFunc(1).tolist(),
0.9081621)
self.assertAlmostEqual(
LifecycleExample.solution[2].cFunc(1).tolist(),
0.9563899)
# test the solution_terminal
self.assertAlmostEqual(
LifecycleExample.solution[10].cFunc(2).tolist(),
2)
solver = ConsIndShockSolverBasic(
LifecycleExample.solution[-2],
LifecycleExample.IncomeDstn[0],
LifecycleExample.LivPrb[0],
LifecycleExample.DiscFac,
LifecycleExample.CRRA,
LifecycleExample.Rfree,
LifecycleExample.PermGroFac[0],
LifecycleExample.BoroCnstArt,
LifecycleExample.aXtraGrid,
LifecycleExample.vFuncBool,
LifecycleExample.CubicBool)
solver.prepareToSolve()
self.assertAlmostEqual(solver.DiscFacEff,
0.9503999999999999)
self.assertAlmostEqual(solver.PermShkMinNext,
0.850430160026919)
self.assertAlmostEqual(solver.cFuncNowCnst(4).tolist(),
4.0)
self.assertAlmostEqual(solver.prepareToCalcEndOfPrdvP()[0],
-0.2491750859108316)
self.assertAlmostEqual(solver.prepareToCalcEndOfPrdvP()[-1],
19.74982491408914)
EndOfPrdvP = solver.calcEndOfPrdvP()
self.assertAlmostEqual(EndOfPrdvP[0],
6622.251864311334)
self.assertAlmostEqual(EndOfPrdvP[-1],
0.026301061207747087)
solution = solver.makeBasicSolution(EndOfPrdvP,
solver.aNrmNow,
solver.makeLinearcFunc)
solver.addMPCandHumanWealth(solution)
self.assertAlmostEqual(solution.cFunc(4).tolist(),
1.7391265696400773)
def test_simulated_values(self):
self.agent.initializeSim()
self.agent.simulate()
print(self.agent.aLvlNow)
self.assertAlmostEqual(self.agent.MPCnow[1],
0.5535801655448935)
self.assertAlmostEqual(self.agent.aLvlNow[1],
0.18832361)
class testBufferStock(unittest.TestCase):
""" Tests of the results of the BufferStock REMARK.
"""
def setUp(self):
# Make a dictionary containing all parameters needed to solve the model
self.base_params = Params.init_idiosyncratic_shocks
# Set the parameters for the baseline results in the paper
# using the variable values defined in the cell above
self.base_params['PermGroFac'] = [1.03]
self.base_params['Rfree'] = 1.04
self.base_params['DiscFac'] = 0.96
self.base_params['CRRA'] = 2.00
self.base_params['UnempPrb'] = 0.005
self.base_params['IncUnemp'] = 0.0
self.base_params['PermShkStd'] = [0.1]
self.base_params['TranShkStd'] = [0.1]
self.base_params['LivPrb'] = [1.0]
self.base_params['CubicBool'] = True
self.base_params['T_cycle'] = 1
self.base_params['BoroCnstArt'] = None
def test_baseEx(self):
baseEx = IndShockConsumerType(**self.base_params)
baseEx.cycles = 100 # Make this type have a finite horizon (Set T = 100)
baseEx.solve()
baseEx.unpackcFunc()
m = np.linspace(0,9.5,1000)
c_m = baseEx.cFunc[0](m)
c_t1 = baseEx.cFunc[-2](m)
c_t5 = baseEx.cFunc[-6](m)
c_t10 = baseEx.cFunc[-11](m)
self.assertAlmostEqual(c_m[500], 1.4008090582203356)
self.assertAlmostEqual(c_t1[500], 2.9227437159255216)
self.assertAlmostEqual(c_t5[500], 1.7350607327187664)
self.assertAlmostEqual(c_t10[500], 1.4991390649979213)
self.assertAlmostEqual(c_t10[600], 1.6101476268581576)
self.assertAlmostEqual(c_t10[700], 1.7196531041366991)
def test_GICFails(self):
GIC_fail_dictionary = dict(self.base_params)
GIC_fail_dictionary['Rfree'] = 1.08
GIC_fail_dictionary['PermGroFac'] = [1.00]
GICFailExample = IndShockConsumerType(
cycles=0, # cycles=0 makes this an infinite horizon consumer
**GIC_fail_dictionary)
GICFailExample.solve()
GICFailExample.unpackcFunc()
m = np.linspace(0,5,1000)
c_m = GICFailExample.cFunc[0](m)
self.assertAlmostEqual(c_m[500], 0.7772637042393458)
self.assertAlmostEqual(c_m[700], 0.8392649061916746)
self.assertFalse(GICFailExample.GICPF)
def test_infinite_horizon(self):
baseEx_inf = IndShockConsumerType(cycles=0,
**self.base_params)
baseEx_inf.solve()
baseEx_inf.unpackcFunc()
m1 = np.linspace(1,baseEx_inf.solution[0].mNrmSS,50) # m1 defines the plot range on the left of target m value (e.g. m <= target m)
c_m1 = baseEx_inf.cFunc[0](m1)
self.assertAlmostEqual(c_m1[0], 0.8527887545025995)
self.assertAlmostEqual(c_m1[-1], 1.0036279936408656)
x1 = np.linspace(0,25,1000)
cfunc_m = baseEx_inf.cFunc[0](x1)
self.assertAlmostEqual(cfunc_m[500], 1.8902146173138235)
self.assertAlmostEqual(cfunc_m[700], 2.1591451850267176)
m = np.linspace(0.001,8,1000)
# Use the HARK method derivative to get the derivative of cFunc, and the values are just the MPC
MPC = baseEx_inf.cFunc[0].derivative(m)
self.assertAlmostEqual(MPC[500], 0.08415000641504392)
self.assertAlmostEqual(MPC[700], 0.07173144137912524)
IdiosyncDict={
# Parameters shared with the perfect foresight model
"CRRA": 2.0, # Coefficient of relative risk aversion
"Rfree": 1.03, # Interest factor on assets
"DiscFac": 0.96, # Intertemporal discount factor
"LivPrb" : [0.98], # Survival probability
"PermGroFac" :[1.01], # Permanent income growth factor
# Parameters that specify the income distribution over the lifecycle
"PermShkStd" : [0.1], # Standard deviation of log permanent shocks to income
"PermShkCount" : 7, # Number of points in discrete approximation to permanent income shocks
"TranShkStd" : [0.2], # Standard deviation of log transitory shocks to income
"TranShkCount" : 7, # Number of points in discrete approximation to transitory income shocks
"UnempPrb" : 0.05, # Probability of unemployment while working
"IncUnemp" : 0.3, # Unemployment benefits replacement rate
"UnempPrbRet" : 0.0005, # Probability of "unemployment" while retired
"IncUnempRet" : 0.0, # "Unemployment" benefits when retired
"T_retire" : 0, # Period of retirement (0 --> no retirement)
"tax_rate" : 0.0, # Flat income tax rate (legacy parameter, will be removed in future)
# Parameters for constructing the "assets above minimum" grid
"aXtraMin" : 0.001, # Minimum end-of-period "assets above minimum" value
"aXtraMax" : 20, # Maximum end-of-period "assets above minimum" value
"aXtraCount" : 48, # Number of points in the base grid of "assets above minimum"
"aXtraNestFac" : 3, # Exponential nesting factor when constructing "assets above minimum" grid
"aXtraExtra" : [None], # Additional values to add to aXtraGrid
# A few other paramaters
"BoroCnstArt" : 0.0, # Artificial borrowing constraint; imposed minimum level of end-of period assets
"vFuncBool" : True, # Whether to calculate the value function during solution
"CubicBool" : False, # Preference shocks currently only compatible with linear cFunc
"T_cycle" : 1, # Number of periods in the cycle for this agent type
# Parameters only used in simulation
"AgentCount" : 10000, # Number of agents of this type
"T_sim" : 120, # Number of periods to simulate
"aNrmInitMean" : -6.0, # Mean of log initial assets
"aNrmInitStd" : 1.0, # Standard deviation of log initial assets
"pLvlInitMean" : 0.0, # Mean of log initial permanent income
"pLvlInitStd" : 0.0, # Standard deviation of log initial permanent income
"PermGroFacAgg" : 1.0, # Aggregate permanent income growth factor
"T_age" : None, # Age after which simulated agents are automatically killed
}
class testIndShockConsumerTypeExample(unittest.TestCase):
def test_infinite_horizon(self):
IndShockExample = IndShockConsumerType(**IdiosyncDict)
IndShockExample.cycles = 0 # Make this type have an infinite horizon
IndShockExample.solve()
self.assertAlmostEqual(IndShockExample.solution[0].mNrmSS,
1.5488165705077026)
self.assertAlmostEqual(IndShockExample.solution[0].cFunc.functions[0].x_list[0],
-0.25017509)
IndShockExample.track_vars = ['aNrmNow','mNrmNow','cNrmNow','pLvlNow']
IndShockExample.initializeSim()
IndShockExample.simulate()
self.assertAlmostEqual(IndShockExample.mNrmNow_hist[0][0],
1.0170176090252379)
LifecycleDict={ # Click arrow to expand this fairly large parameter dictionary
# Parameters shared with the perfect foresight model
"CRRA": 2.0, # Coefficient of relative risk aversion
"Rfree": 1.03, # Interest factor on assets
"DiscFac": 0.96, # Intertemporal discount factor
"LivPrb" : [0.99,0.9,0.8,0.7,0.6,0.5,0.4,0.3,0.2,0.1],
"PermGroFac" : [1.01,1.01,1.01,1.02,1.02,1.02,0.7,1.0,1.0,1.0],
# Parameters that specify the income distribution over the lifecycle
"PermShkStd" : [0.1,0.2,0.1,0.2,0.1,0.2,0.1,0,0,0],
"PermShkCount" : 7, # Number of points in discrete approximation to permanent income shocks
"TranShkStd" : [0.3,0.2,0.1,0.3,0.2,0.1,0.3,0,0,0],
"TranShkCount" : 7, # Number of points in discrete approximation to transitory income shocks
"UnempPrb" : 0.05, # Probability of unemployment while working
"IncUnemp" : 0.3, # Unemployment benefits replacement rate
"UnempPrbRet" : 0.0005, # Probability of "unemployment" while retired
"IncUnempRet" : 0.0, # "Unemployment" benefits when retired
"T_retire" : 7, # Period of retirement (0 --> no retirement)
"tax_rate" : 0.0, # Flat income tax rate (legacy parameter, will be removed in future)
# Parameters for constructing the "assets above minimum" grid
"aXtraMin" : 0.001, # Minimum end-of-period "assets above minimum" value
"aXtraMax" : 20, # Maximum end-of-period "assets above minimum" value
"aXtraCount" : 48, # Number of points in the base grid of "assets above minimum"
"aXtraNestFac" : 3, # Exponential nesting factor when constructing "assets above minimum" grid
"aXtraExtra" : [None], # Additional values to add to aXtraGrid
# A few other paramaters
"BoroCnstArt" : 0.0, # Artificial borrowing constraint; imposed minimum level of end-of period assets
"vFuncBool" : True, # Whether to calculate the value function during solution
"CubicBool" : False, # Preference shocks currently only compatible with linear cFunc
"T_cycle" : 10, # Number of periods in the cycle for this agent type
# Parameters only used in simulation
"AgentCount" : 10000, # Number of agents of this type
"T_sim" : 120, # Number of periods to simulate
"aNrmInitMean" : -6.0, # Mean of log initial assets
"aNrmInitStd" : 1.0, # Standard deviation of log initial assets
"pLvlInitMean" : 0.0, # Mean of log initial permanent income
"pLvlInitStd" : 0.0, # Standard deviation of log initial permanent income
"PermGroFacAgg" : 1.0, # Aggregate permanent income growth factor
"T_age" : 11, # Age after which simulated agents are automatically killed
}
class testIndShockConsumerTypeLifecycle(unittest.TestCase):
def test_lifecyle(self):
LifecycleExample = IndShockConsumerType(**LifecycleDict)
LifecycleExample.cycles = 1
LifecycleExample.solve()
self.assertEquals(len(LifecycleExample.solution), 11)
mMin = np.min([LifecycleExample.solution[t].mNrmMin for t in
range(LifecycleExample.T_cycle)])
self.assertAlmostEqual(LifecycleExample.solution[5].cFunc(3).tolist(),
2.129983771775666)
CyclicalDict = {
# Parameters shared with the perfect foresight model
"CRRA": 2.0, # Coefficient of relative risk aversion
"Rfree": 1.03, # Interest factor on assets
"DiscFac": 0.96, # Intertemporal discount factor
"LivPrb" : 4*[0.98], # Survival probability
"PermGroFac" : [1.082251, 2.8, 0.3, 1.1],
# Parameters that specify the income distribution over the lifecycle
"PermShkStd" : [0.1,0.1,0.1,0.1],
"PermShkCount" : 7, # Number of points in discrete approximation to permanent income shocks
"TranShkStd" : [0.2,0.2,0.2,0.2],
"TranShkCount" : 7, # Number of points in discrete approximation to transitory income shocks
"UnempPrb" : 0.05, # Probability of unemployment while working
"IncUnemp" : 0.3, # Unemployment benefits replacement rate
"UnempPrbRet" : 0.0005, # Probability of "unemployment" while retired
"IncUnempRet" : 0.0, # "Unemployment" benefits when retired
"T_retire" : 0, # Period of retirement (0 --> no retirement)
"tax_rate" : 0.0, # Flat income tax rate (legacy parameter, will be removed in future)
# Parameters for constructing the "assets above minimum" grid
"aXtraMin" : 0.001, # Minimum end-of-period "assets above minimum" value
"aXtraMax" : 20, # Maximum end-of-period "assets above minimum" value
"aXtraCount" : 48, # Number of points in the base grid of "assets above minimum"
"aXtraNestFac" : 3, # Exponential nesting factor when constructing "assets above minimum" grid
"aXtraExtra" : [None], # Additional values to add to aXtraGrid
# A few other paramaters
"BoroCnstArt" : 0.0, # Artificial borrowing constraint; imposed minimum level of end-of period assets
"vFuncBool" : True, # Whether to calculate the value function during solution
"CubicBool" : False, # Preference shocks currently only compatible with linear cFunc
"T_cycle" : 4, # Number of periods in the cycle for this agent type
# Parameters only used in simulation
"AgentCount" : 10000, # Number of agents of this type
"T_sim" : 120, # Number of periods to simulate
"aNrmInitMean" : -6.0, # Mean of log initial assets
"aNrmInitStd" : 1.0, # Standard deviation of log initial assets
"pLvlInitMean" : 0.0, # Mean of log initial permanent income
"pLvlInitStd" : 0.0, # Standard deviation of log initial permanent income
"PermGroFacAgg" : 1.0, # Aggregate permanent income growth factor
"T_age" : None, # Age after which simulated agents are automatically killed
}
class testIndShockConsumerTypeLifecycle(unittest.TestCase):
def test_lifecyle(self):
CyclicalExample = IndShockConsumerType(**CyclicalDict)
CyclicalExample.cycles = 0 # Make this consumer type have an infinite horizon
CyclicalExample.solve()
self.assertAlmostEqual(CyclicalExample.solution[3].cFunc(3).tolist(),
1.5958390056965004)
|
import base64
import urllib2
import json
import os
from celery import task
from django.conf import settings
from django.utils.timezone import now
from github.GithubObject import NotSet
from github import Github, GithubException, InputGitTreeElement
from ide.git import git_auth_check, get_github
from ide.models.build import BuildResult
from ide.models.project import Project
from ide.tasks import do_import_archive, run_compile
from ide.utils.git import git_sha, git_blob
from ide.utils.project import find_project_root
from ide.utils.sdk import generate_manifest_dict, generate_manifest, generate_wscript_file
from utils.keen_helper import send_keen_event
__author__ = 'katharine'
@task(acks_late=True)
def do_import_github(project_id, github_user, github_project, github_branch, delete_project=False):
try:
url = "https://github.com/%s/%s/archive/%s.zip" % (github_user, github_project, github_branch)
if file_exists(url):
u = urllib2.urlopen(url)
return do_import_archive(project_id, u.read())
else:
raise Exception("The branch '%s' does not exist." % github_branch)
except Exception as e:
try:
project = Project.objects.get(pk=project_id)
user = project.owner
except:
project = None
user = None
if delete_project and project is not None:
try:
project.delete()
except:
pass
send_keen_event('cloudpebble', 'cloudpebble_github_import_failed', user=user, data={
'data': {
'reason': e.message,
'github_user': github_user,
'github_project': github_project,
'github_branch': github_branch
}
})
raise
def file_exists(url):
request = urllib2.Request(url)
request.get_method = lambda: 'HEAD'
try:
urllib2.urlopen(request)
except:
return False
else:
return True
@git_auth_check
def github_push(user, commit_message, repo_name, project):
g = Github(user.github.token, client_id=settings.GITHUB_CLIENT_ID, client_secret=settings.GITHUB_CLIENT_SECRET)
repo = g.get_repo(repo_name)
try:
branch = repo.get_branch(project.github_branch or repo.master_branch)
except GithubException:
raise Exception("Unable to get branch.")
commit = repo.get_git_commit(branch.commit.sha)
tree = repo.get_git_tree(commit.tree.sha, recursive=True)
paths = [x.path for x in tree.tree]
next_tree = {x.path: InputGitTreeElement(path=x.path, mode=x.mode, type=x.type, sha=x.sha) for x in tree.tree}
try:
root = find_project_root(paths)
except:
root = ''
expected_paths = set()
def update_expected_paths(new_path):
# This adds the path *and* all parent directories to the list of expected paths.
split_path = new_path.split('/')
expected_paths.update('/'.join(split_path[:p]) for p in range(2, len(split_path) + 1))
src_root = root + 'src/'
worker_src_root = root + 'worker_src/'
project_sources = project.source_files.all()
has_changed = False
for source in project_sources:
if source.target == 'worker':
repo_path = worker_src_root + source.file_name
else:
repo_path = src_root + source.file_name
update_expected_paths(repo_path)
if repo_path not in next_tree:
has_changed = True
next_tree[repo_path] = InputGitTreeElement(path=repo_path, mode='100644', type='blob',
content=source.get_contents())
print "New file: %s" % repo_path
else:
sha = next_tree[repo_path]._InputGitTreeElement__sha
our_content = source.get_contents()
expected_sha = git_sha(our_content)
if expected_sha != sha:
print "Updated file: %s" % repo_path
next_tree[repo_path]._InputGitTreeElement__sha = NotSet
next_tree[repo_path]._InputGitTreeElement__content = our_content
has_changed = True
# Now try handling resource files.
resources = project.resources.all()
resource_root = root + 'resources/'
for res in resources:
for variant in res.variants.all():
repo_path = resource_root + variant.path
update_expected_paths(repo_path)
if repo_path in next_tree:
content = variant.get_contents()
if git_sha(content) != next_tree[repo_path]._InputGitTreeElement__sha:
print "Changed resource: %s" % repo_path
has_changed = True
blob = repo.create_git_blob(base64.b64encode(content), 'base64')
print "Created blob %s" % blob.sha
next_tree[repo_path]._InputGitTreeElement__sha = blob.sha
else:
print "New resource: %s" % repo_path
has_changed = True
blob = repo.create_git_blob(base64.b64encode(variant.get_contents()), 'base64')
print "Created blob %s" % blob.sha
next_tree[repo_path] = InputGitTreeElement(path=repo_path, mode='100644', type='blob', sha=blob.sha)
# Manage deleted files
for path in next_tree.keys():
if not (any(path.startswith(root) for root in (src_root, resource_root, worker_src_root))):
continue
if path not in expected_paths:
del next_tree[path]
print "Deleted file: %s" % path
has_changed = True
# Compare the resource dicts
remote_manifest_path = root + 'appinfo.json'
remote_wscript_path = root + 'wscript'
remote_manifest_sha = next_tree[remote_manifest_path]._InputGitTreeElement__sha if remote_manifest_path in next_tree else None
if remote_manifest_sha is not None:
their_manifest_dict = json.loads(git_blob(repo, remote_manifest_sha))
their_res_dict = their_manifest_dict['resources']
else:
their_manifest_dict = {}
their_res_dict = {'media': []}
our_manifest_dict = generate_manifest_dict(project, resources)
our_res_dict = our_manifest_dict['resources']
if our_res_dict != their_res_dict:
print "Resources mismatch."
has_changed = True
# Try removing things that we've deleted, if any
to_remove = set(x['file'] for x in their_res_dict['media']) - set(x['file'] for x in our_res_dict['media'])
for path in to_remove:
repo_path = resource_root + path
if repo_path in next_tree:
print "Deleted resource: %s" % repo_path
del next_tree[repo_path]
# This one is separate because there's more than just the resource map changing.
if their_manifest_dict != our_manifest_dict:
has_changed = True
if remote_manifest_path in next_tree:
next_tree[remote_manifest_path]._InputGitTreeElement__sha = NotSet
next_tree[remote_manifest_path]._InputGitTreeElement__content = generate_manifest(project, resources)
else:
next_tree[remote_manifest_path] = InputGitTreeElement(path=remote_manifest_path, mode='100644', type='blob',
content=generate_manifest(project, resources))
if project.project_type == 'native' and remote_wscript_path not in next_tree:
next_tree[remote_wscript_path] = InputGitTreeElement(path=remote_wscript_path, mode='100644', type='blob',
content=generate_wscript_file(project, True))
has_changed = True
# Commit the new tree.
if has_changed:
print "Has changed; committing"
# GitHub seems to choke if we pass the raw directory nodes off to it,
# so we delete those.
for x in next_tree.keys():
if next_tree[x]._InputGitTreeElement__mode == '040000':
del next_tree[x]
print "removing subtree node %s" % x
print [x._InputGitTreeElement__mode for x in next_tree.values()]
git_tree = repo.create_git_tree(next_tree.values())
print "Created tree %s" % git_tree.sha
git_commit = repo.create_git_commit(commit_message, git_tree, [commit])
print "Created commit %s" % git_commit.sha
git_ref = repo.get_git_ref('heads/%s' % (project.github_branch or repo.master_branch))
git_ref.edit(git_commit.sha)
print "Updated ref %s" % git_ref.ref
project.github_last_commit = git_commit.sha
project.github_last_sync = now()
project.save()
return True
send_keen_event('cloudpebble', 'cloudpebble_github_push', user=user, data={
'data': {
'repo': project.github_repo
}
})
return False
def get_root_path(path):
path, extension = os.path.splitext(path)
return path.split('~', 1)[0] + extension
@git_auth_check
def github_pull(user, project):
g = get_github(user)
repo_name = project.github_repo
if repo_name is None:
raise Exception("No GitHub repo defined.")
repo = g.get_repo(repo_name)
# If somehow we don't have a branch set, this will use the "master_branch"
branch_name = project.github_branch or repo.master_branch
try:
branch = repo.get_branch(branch_name)
except GithubException:
raise Exception("Unable to get the branch.")
if project.github_last_commit == branch.commit.sha:
# Nothing to do.
return False
commit = repo.get_git_commit(branch.commit.sha)
tree = repo.get_git_tree(commit.tree.sha, recursive=True)
paths = {x.path: x for x in tree.tree}
paths_notags = {get_root_path(x) for x in paths}
root = find_project_root(paths)
# First try finding the resource map so we don't fail out part-done later.
# TODO: transaction support for file contents would be nice...
resource_root = root + 'resources/'
manifest_path = root + 'appinfo.json'
if manifest_path in paths:
manifest_sha = paths[manifest_path].sha
manifest = json.loads(git_blob(repo, manifest_sha))
media = manifest.get('resources', {}).get('media', [])
else:
raise Exception("appinfo.json not found")
project_type = manifest.get('projectType', 'native')
for resource in media:
path = resource_root + resource['file']
if project_type == 'pebblejs' and resource['name'] in {
'MONO_FONT_14', 'IMAGE_MENU_ICON', 'IMAGE_LOGO_SPLASH', 'IMAGE_TILE_SPLASH'}:
continue
if path not in paths_notags:
raise Exception("Resource %s not found in repo." % path)
# Now we grab the zip.
zip_url = repo.get_archive_link('zipball', branch_name)
u = urllib2.urlopen(zip_url)
# And wipe the project!
project.source_files.all().delete()
project.resources.all().delete()
# This must happen before do_import_archive or we'll stamp on its results.
project.github_last_commit = branch.commit.sha
project.github_last_sync = now()
project.save()
import_result = do_import_archive(project.id, u.read())
send_keen_event('cloudpebble', 'cloudpebble_github_pull', user=user, data={
'data': {
'repo': project.github_repo
}
})
return import_result
@task
def do_github_push(project_id, commit_message):
project = Project.objects.select_related('owner__github').get(pk=project_id)
return github_push(project.owner, commit_message, project.github_repo, project)
@task
def do_github_pull(project_id):
project = Project.objects.select_related('owner__github').get(pk=project_id)
return github_pull(project.owner, project)
@task
def hooked_commit(project_id, target_commit):
project = Project.objects.select_related('owner__github').get(pk=project_id)
did_something = False
print "Comparing %s versus %s" % (project.github_last_commit, target_commit)
if project.github_last_commit != target_commit:
github_pull(project.owner, project)
did_something = True
if project.github_hook_build:
build = BuildResult.objects.create(project=project)
run_compile(build.id)
did_something = True
return did_something
PEP-8 fixes
import base64
import urllib2
import json
import os
from celery import task
from django.conf import settings
from django.utils.timezone import now
from github.GithubObject import NotSet
from github import Github, GithubException, InputGitTreeElement
from ide.git import git_auth_check, get_github
from ide.models.build import BuildResult
from ide.models.project import Project
from ide.tasks import do_import_archive, run_compile
from ide.utils.git import git_sha, git_blob
from ide.utils.project import find_project_root
from ide.utils.sdk import generate_manifest_dict, generate_manifest, generate_wscript_file
from utils.keen_helper import send_keen_event
__author__ = 'katharine'
@task(acks_late=True)
def do_import_github(project_id, github_user, github_project, github_branch, delete_project=False):
try:
url = "https://github.com/%s/%s/archive/%s.zip" % (github_user, github_project, github_branch)
if file_exists(url):
u = urllib2.urlopen(url)
return do_import_archive(project_id, u.read())
else:
raise Exception("The branch '%s' does not exist." % github_branch)
except Exception as e:
try:
project = Project.objects.get(pk=project_id)
user = project.owner
except:
project = None
user = None
if delete_project and project is not None:
try:
project.delete()
except:
pass
send_keen_event('cloudpebble', 'cloudpebble_github_import_failed', user=user, data={
'data': {
'reason': e.message,
'github_user': github_user,
'github_project': github_project,
'github_branch': github_branch
}
})
raise
def file_exists(url):
request = urllib2.Request(url)
request.get_method = lambda: 'HEAD'
try:
urllib2.urlopen(request)
except:
return False
else:
return True
@git_auth_check
def github_push(user, commit_message, repo_name, project):
g = Github(user.github.token, client_id=settings.GITHUB_CLIENT_ID, client_secret=settings.GITHUB_CLIENT_SECRET)
repo = g.get_repo(repo_name)
try:
branch = repo.get_branch(project.github_branch or repo.master_branch)
except GithubException:
raise Exception("Unable to get branch.")
commit = repo.get_git_commit(branch.commit.sha)
tree = repo.get_git_tree(commit.tree.sha, recursive=True)
paths = [x.path for x in tree.tree]
next_tree = {x.path: InputGitTreeElement(path=x.path, mode=x.mode, type=x.type, sha=x.sha) for x in tree.tree}
try:
root = find_project_root(paths)
except:
root = ''
expected_paths = set()
def update_expected_paths(new_path):
# This adds the path *and* all parent directories to the list of expected paths.
split_path = new_path.split('/')
expected_paths.update('/'.join(split_path[:p]) for p in range(2, len(split_path) + 1))
src_root = root + 'src/'
worker_src_root = root + 'worker_src/'
project_sources = project.source_files.all()
has_changed = False
for source in project_sources:
if source.target == 'worker':
repo_path = worker_src_root + source.file_name
else:
repo_path = src_root + source.file_name
update_expected_paths(repo_path)
if repo_path not in next_tree:
has_changed = True
next_tree[repo_path] = InputGitTreeElement(path=repo_path, mode='100644', type='blob',
content=source.get_contents())
print "New file: %s" % repo_path
else:
sha = next_tree[repo_path]._InputGitTreeElement__sha
our_content = source.get_contents()
expected_sha = git_sha(our_content)
if expected_sha != sha:
print "Updated file: %s" % repo_path
next_tree[repo_path]._InputGitTreeElement__sha = NotSet
next_tree[repo_path]._InputGitTreeElement__content = our_content
has_changed = True
# Now try handling resource files.
resources = project.resources.all()
resource_root = root + 'resources/'
for res in resources:
for variant in res.variants.all():
repo_path = resource_root + variant.path
update_expected_paths(repo_path)
if repo_path in next_tree:
content = variant.get_contents()
if git_sha(content) != next_tree[repo_path]._InputGitTreeElement__sha:
print "Changed resource: %s" % repo_path
has_changed = True
blob = repo.create_git_blob(base64.b64encode(content), 'base64')
print "Created blob %s" % blob.sha
next_tree[repo_path]._InputGitTreeElement__sha = blob.sha
else:
print "New resource: %s" % repo_path
has_changed = True
blob = repo.create_git_blob(base64.b64encode(variant.get_contents()), 'base64')
print "Created blob %s" % blob.sha
next_tree[repo_path] = InputGitTreeElement(path=repo_path, mode='100644', type='blob', sha=blob.sha)
# Manage deleted files
for path in next_tree.keys():
if not (any(path.startswith(root) for root in (src_root, resource_root, worker_src_root))):
continue
if path not in expected_paths:
del next_tree[path]
print "Deleted file: %s" % path
has_changed = True
# Compare the resource dicts
remote_manifest_path = root + 'appinfo.json'
remote_wscript_path = root + 'wscript'
remote_manifest_sha = next_tree[remote_manifest_path]._InputGitTreeElement__sha if remote_manifest_path in next_tree else None
if remote_manifest_sha is not None:
their_manifest_dict = json.loads(git_blob(repo, remote_manifest_sha))
their_res_dict = their_manifest_dict['resources']
else:
their_manifest_dict = {}
their_res_dict = {'media': []}
our_manifest_dict = generate_manifest_dict(project, resources)
our_res_dict = our_manifest_dict['resources']
if our_res_dict != their_res_dict:
print "Resources mismatch."
has_changed = True
# Try removing things that we've deleted, if any
to_remove = set(x['file'] for x in their_res_dict['media']) - set(x['file'] for x in our_res_dict['media'])
for path in to_remove:
repo_path = resource_root + path
if repo_path in next_tree:
print "Deleted resource: %s" % repo_path
del next_tree[repo_path]
# This one is separate because there's more than just the resource map changing.
if their_manifest_dict != our_manifest_dict:
has_changed = True
if remote_manifest_path in next_tree:
next_tree[remote_manifest_path]._InputGitTreeElement__sha = NotSet
next_tree[remote_manifest_path]._InputGitTreeElement__content = generate_manifest(project, resources)
else:
next_tree[remote_manifest_path] = InputGitTreeElement(path=remote_manifest_path, mode='100644', type='blob',
content=generate_manifest(project, resources))
if project.project_type == 'native' and remote_wscript_path not in next_tree:
next_tree[remote_wscript_path] = InputGitTreeElement(path=remote_wscript_path, mode='100644', type='blob',
content=generate_wscript_file(project, True))
has_changed = True
# Commit the new tree.
if has_changed:
print "Has changed; committing"
# GitHub seems to choke if we pass the raw directory nodes off to it,
# so we delete those.
for x in next_tree.keys():
if next_tree[x]._InputGitTreeElement__mode == '040000':
del next_tree[x]
print "removing subtree node %s" % x
print [x._InputGitTreeElement__mode for x in next_tree.values()]
git_tree = repo.create_git_tree(next_tree.values())
print "Created tree %s" % git_tree.sha
git_commit = repo.create_git_commit(commit_message, git_tree, [commit])
print "Created commit %s" % git_commit.sha
git_ref = repo.get_git_ref('heads/%s' % (project.github_branch or repo.master_branch))
git_ref.edit(git_commit.sha)
print "Updated ref %s" % git_ref.ref
project.github_last_commit = git_commit.sha
project.github_last_sync = now()
project.save()
return True
send_keen_event('cloudpebble', 'cloudpebble_github_push', user=user, data={
'data': {
'repo': project.github_repo
}
})
return False
def get_root_path(path):
path, extension = os.path.splitext(path)
return path.split('~', 1)[0] + extension
@git_auth_check
def github_pull(user, project):
g = get_github(user)
repo_name = project.github_repo
if repo_name is None:
raise Exception("No GitHub repo defined.")
repo = g.get_repo(repo_name)
# If somehow we don't have a branch set, this will use the "master_branch"
branch_name = project.github_branch or repo.master_branch
try:
branch = repo.get_branch(branch_name)
except GithubException:
raise Exception("Unable to get the branch.")
if project.github_last_commit == branch.commit.sha:
# Nothing to do.
return False
commit = repo.get_git_commit(branch.commit.sha)
tree = repo.get_git_tree(commit.tree.sha, recursive=True)
paths = {x.path: x for x in tree.tree}
paths_notags = {get_root_path(x) for x in paths}
root = find_project_root(paths)
# First try finding the resource map so we don't fail out part-done later.
# TODO: transaction support for file contents would be nice...
resource_root = root + 'resources/'
manifest_path = root + 'appinfo.json'
if manifest_path in paths:
manifest_sha = paths[manifest_path].sha
manifest = json.loads(git_blob(repo, manifest_sha))
media = manifest.get('resources', {}).get('media', [])
else:
raise Exception("appinfo.json not found")
project_type = manifest.get('projectType', 'native')
for resource in media:
path = resource_root + resource['file']
if project_type == 'pebblejs' and resource['name'] in {
'MONO_FONT_14', 'IMAGE_MENU_ICON', 'IMAGE_LOGO_SPLASH', 'IMAGE_TILE_SPLASH'}:
continue
if path not in paths_notags:
raise Exception("Resource %s not found in repo." % path)
# Now we grab the zip.
zip_url = repo.get_archive_link('zipball', branch_name)
u = urllib2.urlopen(zip_url)
# And wipe the project!
project.source_files.all().delete()
project.resources.all().delete()
# This must happen before do_import_archive or we'll stamp on its results.
project.github_last_commit = branch.commit.sha
project.github_last_sync = now()
project.save()
import_result = do_import_archive(project.id, u.read())
send_keen_event('cloudpebble', 'cloudpebble_github_pull', user=user, data={
'data': {
'repo': project.github_repo
}
})
return import_result
@task
def do_github_push(project_id, commit_message):
project = Project.objects.select_related('owner__github').get(pk=project_id)
return github_push(project.owner, commit_message, project.github_repo, project)
@task
def do_github_pull(project_id):
project = Project.objects.select_related('owner__github').get(pk=project_id)
return github_pull(project.owner, project)
@task
def hooked_commit(project_id, target_commit):
project = Project.objects.select_related('owner__github').get(pk=project_id)
did_something = False
print "Comparing %s versus %s" % (project.github_last_commit, target_commit)
if project.github_last_commit != target_commit:
github_pull(project.owner, project)
did_something = True
if project.github_hook_build:
build = BuildResult.objects.create(project=project)
run_compile(build.id)
did_something = True
return did_something
|
# The MIT License (MIT)
# Copyright (c) 2016, 2017 by the ESA CCI Toolbox development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Description
===========
Index calculation operations
Functions
=========
"""
import xarray as xr
import pandas as pd
from cate.core.op import op, op_input
from cate.ops.select import select_var
from cate.ops.subset import subset_spatial
from cate.ops.anomaly import anomaly_external
from cate.core.types import PolygonLike, VarName
_ALL_FILE_FILTER = dict(name='All Files', extensions=['*'])
@op(tags=['index', 'nino34'])
@op_input('file', file_open_mode='w', file_filters=[dict(name='NetCDF', extensions=['nc']), _ALL_FILE_FILTER])
@op_input('var', value_set_source='ds', data_type=VarName)
def enso_nino34(ds: xr.Dataset,
var: VarName.TYPE,
file: str,
threshold: float=None) -> pd.DataFrame:
"""
Calculate nino34 index, which is defined as a five month running mean of
anomalies of monthly means of SST data in Nino3.4 region:: lon_min=-170
lat_min=-5 lon_max=-120 lat_max=5.
:param ds: A monthly SST dataset
:param file: Path to the reference data file
:param var: Dataset variable (geophysial quantity) to use for index
calculation.
:param threshold: If given, boolean El Nino/La Nina timeseries will be
calculated and added to the output dataset according to the given
threshold. Where anomaly larger than the positive value of the threshold
indicates El Nino and anomaly smaller than the negative of the given
threshold indicates La Nina.
:return: A dataset that contains the index timeseries.
"""
n34 = '-170, -5, -120, 5'
name = 'ENSO N3.4 Index'
return _generic_index_calculation(ds, var, n34, 5, file, name, threshold)
@op(tags=['index'])
@op_input('var', value_set_source='ds', data_type=VarName)
@op_input('file', file_open_mode='w', file_filters=[dict(name='NetCDF', extensions=['nc']), _ALL_FILE_FILTER])
@op_input('region', value_set=['N1+2', 'N3', 'N34', 'N4', 'custom'])
@op_input('custom_region', data_type=PolygonLike)
def enso(ds: xr.Dataset,
var: VarName.TYPE,
file: str,
region: str='n34',
custom_region: PolygonLike.TYPE=None,
threshold: float=None) -> pd.DataFrame:
"""
Calculate ENSO index, which is defined as a five month running mean of
anomalies of monthly means of SST data in the given region.
:param ds: A monthly SST dataset
:param file: Path to the reference data file
:param var: Dataset variable to use for index calculation
:param region: Region for index calculation, the default is Nino3.4
:param custom_region: If 'custom' is chosen as the 'region', this parameter
has to be provided to set the desired region.
:param threshold: If given, boolean El Nino/La Nina timeseries will be
calculated and added to the output dataset, according to the given
threshold. Where anomaly larger than then positive value of the threshold
indicates El Nino and anomaly smaller than the negative of the given
threshold indicates La Nina.
:return: A dataset that contains the index timeseries.
"""
regions = {'N1+2': '-90, -10, -80, 0',
'N3': '-150, -5, -90, 5',
'N3.4': '-170, -5, -120, 5',
'N4': '160, -5, -150, 5',
'custom': custom_region}
converted_region = PolygonLike.convert(regions[region])
if not converted_region:
raise ValueError('No region has been provided to ENSO index calculation')
name = 'ENSO ' + region + ' Index'
if 'custom' == region:
name = 'ENSO Index over ' + PolygonLike.format(converted_region)
return _generic_index_calculation(ds, var, converted_region, 5, file, name, threshold)
@op(tags=['index'])
@op_input('var', value_set_source='ds', data_type=VarName)
@op_input('file', file_open_mode='w', file_filters=[dict(name='NetCDF', extensions=['nc']), _ALL_FILE_FILTER])
def oni(ds: xr.Dataset, var: VarName.TYPE, file: str, threshold: float=None) -> pd.DataFrame:
"""
Calculate ONI index, which is defined as a three month running mean of
anomalies of monthly means of SST data in the Nino3.4 region.
:param ds: A monthly SST dataset
:param file: Path to the reference data file
:param var: Dataset variable to use for index calculation
:param threshold: If given, boolean El Nino/La Nina timeseries will be
calculated and added to the output dataset, according to the given
threshold. Where anomaly larger than then positive value of the threshold
indicates El Nino and anomaly smaller than the negative of the given
threshold indicates La Nina.
"""
n34 = '-170, -5, -120, 5'
name = 'ONI Index'
return _generic_index_calculation(ds, var, n34, 3, file, name, threshold)
def _generic_index_calculation(ds: xr.Dataset,
var: VarName.TYPE,
region: PolygonLike.TYPE,
window: int,
file: str,
name: str,
threshold: float = None) -> pd.DataFrame:
"""
A generic index calculation. Where an index is defined as an anomaly
against the given reference of a moving average of the given window size of
the given given region of the given variable of the given dataset.
:param ds: Dataset from which to calculate the index
:param var: Variable from which to calculate index
:param region: Spatial subset from which to calculate the index
:param window: Window size for the moving average
:param file: Path to the reference file
:param threshold: Absolute threshold that indicates an ENSO event
:param name: Name of the index
"""
var = VarName.convert(var)
region = PolygonLike.convert(region)
ds = select_var(ds, var)
ds_subset = subset_spatial(ds, region)
anom = anomaly_external(ds_subset, file)
ts = anom.mean(dim=['lat', 'lon'])
df = pd.DataFrame(data=ts[var].values, columns=[name], index=ts.time)
retval = df.rolling(window=window, center=True).mean().dropna()
if threshold is None:
return retval
retval['El Nino'] = pd.Series((retval[name] > threshold),
index=retval.index)
retval['La Nina'] = pd.Series((retval[name] < -threshold),
index=retval.index)
return retval
Fix #320
Make sure the correct file dialog is invoked in enso_nino34
# The MIT License (MIT)
# Copyright (c) 2016, 2017 by the ESA CCI Toolbox development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Description
===========
Index calculation operations
Functions
=========
"""
import xarray as xr
import pandas as pd
from cate.core.op import op, op_input
from cate.ops.select import select_var
from cate.ops.subset import subset_spatial
from cate.ops.anomaly import anomaly_external
from cate.core.types import PolygonLike, VarName
_ALL_FILE_FILTER = dict(name='All Files', extensions=['*'])
@op(tags=['index', 'nino34'])
@op_input('file', file_open_mode='r', file_filters=[dict(name='NetCDF', extensions=['nc']), _ALL_FILE_FILTER])
@op_input('var', value_set_source='ds', data_type=VarName)
def enso_nino34(ds: xr.Dataset,
var: VarName.TYPE,
file: str,
threshold: float=None) -> pd.DataFrame:
"""
Calculate nino34 index, which is defined as a five month running mean of
anomalies of monthly means of SST data in Nino3.4 region:: lon_min=-170
lat_min=-5 lon_max=-120 lat_max=5.
:param ds: A monthly SST dataset
:param file: Path to the reference data file
:param var: Dataset variable (geophysial quantity) to use for index
calculation.
:param threshold: If given, boolean El Nino/La Nina timeseries will be
calculated and added to the output dataset according to the given
threshold. Where anomaly larger than the positive value of the threshold
indicates El Nino and anomaly smaller than the negative of the given
threshold indicates La Nina.
:return: A dataset that contains the index timeseries.
"""
n34 = '-170, -5, -120, 5'
name = 'ENSO N3.4 Index'
return _generic_index_calculation(ds, var, n34, 5, file, name, threshold)
@op(tags=['index'])
@op_input('var', value_set_source='ds', data_type=VarName)
@op_input('file', file_open_mode='w', file_filters=[dict(name='NetCDF', extensions=['nc']), _ALL_FILE_FILTER])
@op_input('region', value_set=['N1+2', 'N3', 'N34', 'N4', 'custom'])
@op_input('custom_region', data_type=PolygonLike)
def enso(ds: xr.Dataset,
var: VarName.TYPE,
file: str,
region: str='n34',
custom_region: PolygonLike.TYPE=None,
threshold: float=None) -> pd.DataFrame:
"""
Calculate ENSO index, which is defined as a five month running mean of
anomalies of monthly means of SST data in the given region.
:param ds: A monthly SST dataset
:param file: Path to the reference data file
:param var: Dataset variable to use for index calculation
:param region: Region for index calculation, the default is Nino3.4
:param custom_region: If 'custom' is chosen as the 'region', this parameter
has to be provided to set the desired region.
:param threshold: If given, boolean El Nino/La Nina timeseries will be
calculated and added to the output dataset, according to the given
threshold. Where anomaly larger than then positive value of the threshold
indicates El Nino and anomaly smaller than the negative of the given
threshold indicates La Nina.
:return: A dataset that contains the index timeseries.
"""
regions = {'N1+2': '-90, -10, -80, 0',
'N3': '-150, -5, -90, 5',
'N3.4': '-170, -5, -120, 5',
'N4': '160, -5, -150, 5',
'custom': custom_region}
converted_region = PolygonLike.convert(regions[region])
if not converted_region:
raise ValueError('No region has been provided to ENSO index calculation')
name = 'ENSO ' + region + ' Index'
if 'custom' == region:
name = 'ENSO Index over ' + PolygonLike.format(converted_region)
return _generic_index_calculation(ds, var, converted_region, 5, file, name, threshold)
@op(tags=['index'])
@op_input('var', value_set_source='ds', data_type=VarName)
@op_input('file', file_open_mode='w', file_filters=[dict(name='NetCDF', extensions=['nc']), _ALL_FILE_FILTER])
def oni(ds: xr.Dataset, var: VarName.TYPE, file: str, threshold: float=None) -> pd.DataFrame:
"""
Calculate ONI index, which is defined as a three month running mean of
anomalies of monthly means of SST data in the Nino3.4 region.
:param ds: A monthly SST dataset
:param file: Path to the reference data file
:param var: Dataset variable to use for index calculation
:param threshold: If given, boolean El Nino/La Nina timeseries will be
calculated and added to the output dataset, according to the given
threshold. Where anomaly larger than then positive value of the threshold
indicates El Nino and anomaly smaller than the negative of the given
threshold indicates La Nina.
"""
n34 = '-170, -5, -120, 5'
name = 'ONI Index'
return _generic_index_calculation(ds, var, n34, 3, file, name, threshold)
def _generic_index_calculation(ds: xr.Dataset,
var: VarName.TYPE,
region: PolygonLike.TYPE,
window: int,
file: str,
name: str,
threshold: float = None) -> pd.DataFrame:
"""
A generic index calculation. Where an index is defined as an anomaly
against the given reference of a moving average of the given window size of
the given given region of the given variable of the given dataset.
:param ds: Dataset from which to calculate the index
:param var: Variable from which to calculate index
:param region: Spatial subset from which to calculate the index
:param window: Window size for the moving average
:param file: Path to the reference file
:param threshold: Absolute threshold that indicates an ENSO event
:param name: Name of the index
"""
var = VarName.convert(var)
region = PolygonLike.convert(region)
ds = select_var(ds, var)
ds_subset = subset_spatial(ds, region)
anom = anomaly_external(ds_subset, file)
ts = anom.mean(dim=['lat', 'lon'])
df = pd.DataFrame(data=ts[var].values, columns=[name], index=ts.time)
retval = df.rolling(window=window, center=True).mean().dropna()
if threshold is None:
return retval
retval['El Nino'] = pd.Series((retval[name] > threshold),
index=retval.index)
retval['La Nina'] = pd.Series((retval[name] < -threshold),
index=retval.index)
return retval
|
"""Django forms for hs_core module."""
import copy
from django.forms import ModelForm, BaseFormSet
from django.contrib.admin.widgets import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Fieldset, HTML
from crispy_forms.bootstrap import Field
from .hydroshare import utils
from .models import Party, Creator, Contributor, validate_user_url, Relation, Source, Identifier, \
FundingAgency, Description
class Helper(object):
"""Render resusable elements to use in Django forms."""
@classmethod
def get_element_add_modal_form(cls, element_name, modal_form_context_name):
"""Apply a modal UI element to a given form.
Used in netCDF and modflow_modelinstance apps
"""
modal_title = "Add %s" % element_name.title()
layout = Layout(
HTML('<div class="modal fade" id="add-element-dialog" tabindex="-1" '
'role="dialog" aria-labelledby="myModalLabel" aria-hidden="true">'
'<div class="modal-dialog">'
'<div class="modal-content">'),
HTML('<form action="{{ form.action }}" '
'method="POST" enctype="multipart/form-data"> '),
HTML('{% csrf_token %} '
'<input name="resource-mode" type="hidden" value="edit"/>'
'<div class="modal-header">'
'<button type="button" class="close" '
'data-dismiss="modal" aria-hidden="true">×'
'</button>'),
HTML('<h4 class="modal-title" id="myModalLabel"> Add Element </h4>'),
HTML('</div>'
'<div class="modal-body">'
'{% csrf_token %}'
'<div class="form-group">'),
HTML('{% load crispy_forms_tags %} {% crispy add_creator_modal_form %} '),
HTML('</div>'
'</div>'
'<div class="modal-footer">'
'<button type="button" class="btn btn-default" '
'data-dismiss="modal">Close</button>'
'<button type="submit" class="btn btn-primary">'
'Save changes</button>'
'</div>'
'</form>'
'</div>'
'</div>'
'</div>')
)
layout[0] = HTML('<div class="modal fade" id="add-%s-dialog" tabindex="-1" role="dialog" '
'aria-labelledby="myModalLabel" aria-hidden="true">'
'<div class="modal-dialog">'
'<div class="modal-content">' % element_name.lower())
layout[1] = HTML('<form action="{{ %s.action }}" method="POST" '
'enctype="multipart/form-data"> ' % modal_form_context_name)
layout[3] = HTML('<h4 class="modal-title" id="myModalLabel"> {title} '
'</h4>'.format(title=modal_title),)
html_str = '{% load crispy_forms_tags %} {% crispy' + ' add_{element}_modal_form'.format(
element=element_name.lower()) + ' %}'
layout[5] = HTML(html_str)
return layout
# the 1st and the 3rd HTML layout objects get replaced in MetaDataElementDeleteForm class
def _get_modal_confirm_delete_matadata_element():
layout = Layout(
HTML('<div class="modal fade" id="delete-metadata-element-dialog" '
'tabindex="-1" role="dialog" aria-labelledby="myModalLabel" '
'aria-hidden="true">'),
HTML('<div class="modal-dialog">'
'<div class="modal-content">'
'<div class="modal-header">'
'<button type="button" class="close" data-dismiss="modal" '
'aria-hidden="true">×</button>'
'<h4 class="modal-title" id="myModalLabel">'
'Delete metadata element</h4>'
'</div>'
'<div class="modal-body">'
'<strong>Are you sure you want to delete this metadata '
'element?</strong>'
'</div>'
'<div class="modal-footer">'
'<button type="button" class="btn btn-default" '
'data-dismiss="modal">Cancel</button>'),
HTML('<a type="button" class="btn btn-danger" href="">Delete</a>'),
HTML('</div>'
'</div>'
'</div>'
'</div>'),
)
return layout
class MetaDataElementDeleteForm(forms.Form):
"""Render a modal that confirms element deletion."""
def __init__(self, res_short_id, element_name, element_id, *args, **kwargs):
"""Render a modal that confirms element deletion.
uses _get_modal_confirm_delete_matadata_element
"""
super(MetaDataElementDeleteForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.delete_element_action = '"/hsapi/_internal/%s/%s/%s/delete-metadata/"' % \
(res_short_id, element_name, element_id)
self.helper.layout = _get_modal_confirm_delete_matadata_element()
self.helper.layout[0] = HTML('<div class="modal fade" id="delete-%s-element-dialog_%s" '
'tabindex="-1" role="dialog" aria-labelledby="myModalLabel" '
'aria-hidden="true">' % (element_name, element_id))
self.helper.layout[2] = HTML('<a type="button" class="btn btn-danger" '
'href=%s>Delete</a>' % self.delete_element_action)
self.helper.form_tag = False
class ExtendedMetadataForm(forms.Form):
"""Render an extensible metadata form via the extended_metadata_layout kwarg."""
def __init__(self, resource_mode='edit', extended_metadata_layout=None, *args, **kwargs):
"""Render an extensible metadata form via the extended_metadata_layout kwarg."""
super(ExtendedMetadataForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.layout = extended_metadata_layout
class CreatorFormSetHelper(FormHelper):
"""Render a creator form with custom HTML5 validation and error display."""
def __init__(self, *args, **kwargs):
"""Render a creator form with custom HTML5 validation and error display."""
super(CreatorFormSetHelper, self).__init__(*args, **kwargs)
# the order in which the model fields are listed for the FieldSet is the order
# these fields will be displayed
field_width = 'form-control input-sm'
self.form_tag = False
self.form_show_errors = True
self.error_text_inline = True
self.html5_required = True
self.layout = Layout(
Fieldset('Creator',
Field('name', css_class=field_width),
Field('description', css_class=field_width),
Field('organization', css_class=field_width),
Field('email', css_class=field_width),
Field('address', css_class=field_width),
Field('phone', css_class=field_width),
Field('homepage', css_class=field_width),
Field('order', css_class=field_width),
),
)
class PartyForm(ModelForm):
"""Render form for creating and editing Party models, aka people."""
def __init__(self, *args, **kwargs):
"""Render form for creating and editing Party models, aka people.
Removes profile link formset and renders proper description URL
"""
if 'initial' in kwargs:
if 'description' in kwargs['initial']:
if kwargs['initial']['description']:
kwargs['initial']['description'] = utils.current_site_url() + \
kwargs['initial']['description']
super(PartyForm, self).__init__(*args, **kwargs)
self.profile_link_formset = None
self.number = 0
class Meta:
"""Describe meta properties of PartyForm.
Fields that will be displayed are specified here - but not necessarily in the same order
"""
model = Party
fields = ['name', 'description', 'organization', 'email', 'address', 'phone', 'homepage']
# TODO: field labels and widgets types to be specified
labels = {'description': 'HydroShare User Identifier (URL)'}
class CreatorForm(PartyForm):
"""Render form for creating and editing Creator models, as in creators of resources."""
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, *args, **kwargs):
"""Render form for creating and editing Creator models, as in creators of resources."""
super(CreatorForm, self).__init__(*args, **kwargs)
self.helper = CreatorFormSetHelper()
self.delete_modal_form = None
if res_short_id:
self.action = "/hsapi/_internal/%s/creator/add-metadata/" % res_short_id
else:
self.action = ""
if not allow_edit:
for fld_name in self.Meta.fields:
self.fields[fld_name].widget.attrs['readonly'] = True
self.fields[fld_name].widget.attrs['style'] = "background-color:white;"
self.fields['order'].widget.attrs['readonly'] = True
self.fields['order'].widget.attrs['style'] = "background-color:white;"
else:
if 'add-metadata' in self.action:
del self.fields['order']
@property
def form_id(self):
"""Render proper form id by prepending 'id_creator_'."""
form_id = 'id_creator_%s' % self.number
return form_id
@property
def form_id_button(self):
"""Render proper form id with quotes around it."""
form_id = 'id_creator_%s' % self.number
return "'" + form_id + "'"
class Meta:
"""Describe meta properties of PartyForm."""
model = Creator
fields = PartyForm.Meta.fields
fields.append("order")
labels = PartyForm.Meta.labels
class PartyValidationForm(forms.Form):
"""Validate form for Party models."""
description = forms.CharField(required=False, validators=[validate_user_url])
name = forms.CharField(required=False, max_length=100)
organization = forms.CharField(max_length=200, required=False)
email = forms.EmailField(required=False)
address = forms.CharField(max_length=250, required=False)
phone = forms.CharField(max_length=25, required=False)
homepage = forms.URLField(required=False)
identifiers = forms.CharField(required=False)
def clean_description(self):
"""Create absolute URL for Party.description field."""
user_absolute_url = self.cleaned_data['description']
if user_absolute_url:
url_parts = user_absolute_url.split('/')
if len(url_parts) > 4:
return '/user/{user_id}/'.format(user_id=url_parts[4])
return user_absolute_url
def clean_identifiers(self):
data = self.cleaned_data['identifiers']
return Party.validate_identifiers(data)
def clean(self):
"""Validate that name and/or organization are present in form data."""
cleaned_data = super(PartyValidationForm, self).clean()
name = cleaned_data.get('name', None)
org = cleaned_data.get('organization', None)
if not org:
if not name or len(name.strip()) == 0:
self._errors['name'] = ["A value for name or organization is required but both "
"are missing"]
return self.cleaned_data
class CreatorValidationForm(PartyValidationForm):
"""Validate form for Creator models. Extends PartyValidationForm."""
order = forms.IntegerField(required=False)
class ContributorValidationForm(PartyValidationForm):
"""Validate form for Contributor models. Extends PartyValidationForm."""
pass
class BaseCreatorFormSet(BaseFormSet):
"""Render BaseFormSet for working with Creator models."""
def add_fields(self, form, index):
"""Pass through add_fields function to super."""
super(BaseCreatorFormSet, self).add_fields(form, index)
def get_metadata(self):
"""Collect and append creator data to form fields."""
creators_data = []
for form in self.forms:
creator_data = {k: v for k, v in list(form.cleaned_data.items())}
if creator_data:
creators_data.append({'creator': creator_data})
return creators_data
class ContributorFormSetHelper(FormHelper):
"""Render layout for Contributor model form and activate required fields."""
def __init__(self, *args, **kwargs):
"""Render layout for Contributor model form and activate required fields."""
super(ContributorFormSetHelper, self).__init__(*args, **kwargs)
# the order in which the model fields are listed for the FieldSet is the order
# these fields will be displayed
field_width = 'form-control input-sm'
self.form_tag = False
self.layout = Layout(
Fieldset('Contributor',
Field('name', css_class=field_width),
Field('description', css_class=field_width),
Field('organization', css_class=field_width),
Field('email', css_class=field_width),
Field('address', css_class=field_width),
Field('phone', css_class=field_width),
Field('homepage', css_class=field_width),
),
)
self.render_required_fields = True,
class ContributorForm(PartyForm):
"""Render Contributor model form with appropriate attributes."""
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, *args, **kwargs):
"""Render Contributor model form with appropriate attributes."""
super(ContributorForm, self).__init__(*args, **kwargs)
self.helper = ContributorFormSetHelper()
self.delete_modal_form = None
if res_short_id:
self.action = "/hsapi/_internal/%s/contributor/add-metadata/" % res_short_id
else:
self.action = ""
if not allow_edit:
for fld_name in self.Meta.fields:
self.fields[fld_name].widget.attrs['readonly'] = True
self.fields[fld_name].widget.attrs['style'] = "background-color:white;"
@property
def form_id(self):
"""Render proper form id by prepending 'id_contributor_'."""
form_id = 'id_contributor_%s' % self.number
return form_id
@property
def form_id_button(self):
"""Render proper form id with quotes around it."""
form_id = 'id_contributor_%s' % self.number
return "'" + form_id + "'"
class Meta:
"""Describe meta properties of ContributorForm, removing 'order' field."""
model = Contributor
fields = PartyForm.Meta.fields
labels = PartyForm.Meta.labels
if 'order' in fields:
fields.remove('order')
class BaseContributorFormSet(BaseFormSet):
"""Render BaseFormSet for working with Contributor models."""
def add_fields(self, form, index):
"""Pass through add_fields function to super."""
super(BaseContributorFormSet, self).add_fields(form, index)
def get_metadata(self):
"""Collect and append contributor data to form fields."""
contributors_data = []
for form in self.forms:
contributor_data = {k: v for k, v in list(form.cleaned_data.items())}
if contributor_data:
contributors_data.append({'contributor': contributor_data})
return contributors_data
class RelationFormSetHelper(FormHelper):
"""Render layout for Relation form including HTML5 valdiation and errors."""
def __init__(self, *args, **kwargs):
"""Render layout for Relation form including HTML5 valdiation and errors."""
super(RelationFormSetHelper, self).__init__(*args, **kwargs)
# the order in which the model fields are listed for the FieldSet is the order
# these fields will be displayed
field_width = 'form-control input-sm'
self.form_tag = False
self.form_show_errors = True
self.error_text_inline = True
self.html5_required = False
self.layout = Layout(
Fieldset('Relation',
Field('type', css_class=field_width),
Field('value', css_class=field_width),
),
)
class RelationForm(ModelForm):
"""Render Relation model form with appropriate attributes."""
def __init__(self, allow_edit=True, res_short_id=None, *args, **kwargs):
"""Render Relation model form with appropriate attributes."""
super(RelationForm, self).__init__(*args, **kwargs)
self.helper = RelationFormSetHelper()
self.number = 0
self.delete_modal_form = None
if res_short_id:
self.action = "/hsapi/_internal/%s/relation/add-metadata/" % res_short_id
else:
self.action = ""
if not allow_edit:
for fld_name in self.Meta.fields:
self.fields[fld_name].widget.attrs['readonly'] = True
self.fields[fld_name].widget.attrs['style'] = "background-color:white;"
@property
def form_id(self):
"""Render proper form id by prepending 'id_relation_'."""
form_id = 'id_relation_%s' % self.number
return form_id
@property
def form_id_button(self):
"""Render form_id with quotes around it."""
form_id = 'id_relation_%s' % self.number
return "'" + form_id + "'"
class Meta:
"""Describe meta properties of RelationForm."""
model = Relation
# fields that will be displayed are specified here - but not necessarily in the same order
fields = ['type', 'value']
labels = {'type': 'Relation type', 'value': 'Related to'}
class RelationValidationForm(forms.Form):
"""Validate RelationForm 'type' and 'value' CharFields."""
type = forms.CharField(max_length=100)
value = forms.CharField(max_length=500)
class SourceFormSetHelper(FormHelper):
"""Render layout for Source form including HTML5 valdiation and errors."""
def __init__(self, *args, **kwargs):
"""Render layout for Source form including HTML5 valdiation and errors."""
super(SourceFormSetHelper, self).__init__(*args, **kwargs)
# the order in which the model fields are listed for the FieldSet is the order these
# fields will be displayed
field_width = 'form-control input-sm'
self.form_tag = False
self.form_show_errors = True
self.error_text_inline = True
self.html5_required = False
self.layout = Layout(
Fieldset('Source',
Field('derived_from', css_class=field_width),
),
)
class SourceForm(ModelForm):
"""Render Source model form with appropriate attributes."""
def __init__(self, allow_edit=True, res_short_id=None, *args, **kwargs):
"""Render Source model form with appropriate attributes."""
super(SourceForm, self).__init__(*args, **kwargs)
self.helper = SourceFormSetHelper()
self.number = 0
self.delete_modal_form = None
self.allow_edit = allow_edit
if res_short_id:
self.action = "/hsapi/_internal/%s/source/add-metadata/" % res_short_id
else:
self.action = ""
if not allow_edit:
self.fields['derived_from'].widget.attrs['readonly'] = True
self.fields['derived_from'].widget.attrs['style'] = "background-color:white;"
@property
def form_id(self):
"""Render proper form id by prepending 'id_source_'."""
form_id = 'id_source_%s' % self.number
return form_id
@property
def form_id_button(self):
"""Render proper form id with quotes."""
form_id = 'id_source_%s' % self.number
return "'" + form_id + "'"
class Meta:
"""Define meta properties for SourceForm."""
model = Source
# fields that will be displayed are specified here - but not necessarily in the same order
fields = ['derived_from']
class SourceValidationForm(forms.Form):
"""Validate derived_from field from SourceForm."""
derived_from = forms.CharField(max_length=300)
class IdentifierFormSetHelper(FormHelper):
"""Render layout for Identifier form including HTML5 valdiation and errors."""
def __init__(self, *args, **kwargs):
"""Render layout for Identifier form including HTML5 valdiation and errors."""
super(IdentifierFormSetHelper, self).__init__(*args, **kwargs)
# the order in which the model fields are listed for the FieldSet is the order these
# fields will be displayed
field_width = 'form-control input-sm'
self.form_tag = False
self.form_show_errors = True
self.error_text_inline = True
self.html5_required = True
self.layout = Layout(
Fieldset('Identifier',
Field('name', css_class=field_width),
Field('url', css_class=field_width),
),
)
class IdentifierForm(ModelForm):
"""Render Identifier model form with appropriate attributes."""
def __init__(self, res_short_id=None, *args, **kwargs):
"""Render Identifier model form with appropriate attributes."""
super(IdentifierForm, self).__init__(*args, **kwargs)
self.fields['name'].widget.attrs['readonly'] = True
self.fields['name'].widget.attrs['style'] = "background-color:white;"
self.fields['url'].widget.attrs['readonly'] = True
self.fields['url'].widget.attrs['style'] = "background-color:white;"
self.helper = IdentifierFormSetHelper()
self.number = 0
self.delete_modal_form = None
if res_short_id:
self.action = "/hsapi/_internal/%s/identifier/add-metadata/" % res_short_id
else:
self.action = ""
class Meta:
"""Define meta properties for IdentifierForm class."""
model = Identifier
# fields that will be displayed are specified here - but not necessarily in the same order
fields = ['name', 'url']
def clean(self):
"""Ensure that identifier name attribute is not blank."""
data = self.cleaned_data
if data['name'].lower() == 'hydroshareidentifier':
raise forms.ValidationError("Identifier name attribute can't have a value "
"of '{}'.".format(data['name']))
return data
class FundingAgencyFormSetHelper(FormHelper):
"""Render layout for FundingAgency form."""
def __init__(self, *args, **kwargs):
"""Render layout for FundingAgency form."""
super(FundingAgencyFormSetHelper, self).__init__(*args, **kwargs)
# the order in which the model fields are listed for the FieldSet is the order these
# fields will be displayed
field_width = 'form-control input-sm'
self.form_tag = False
self.form_show_errors = True
self.error_text_inline = True
self.html5_required = False
self.layout = Layout(
Fieldset('Funding Agency',
Field('agency_name', css_class=field_width),
Field('award_title', css_class=field_width),
Field('award_number', css_class=field_width),
Field('agency_url', css_class=field_width),
),
)
class FundingAgencyForm(ModelForm):
"""Render FundingAgency model form with appropriate attributes."""
def __init__(self, allow_edit=True, res_short_id=None, *args, **kwargs):
"""Render FundingAgency model form with appropriate attributes."""
super(FundingAgencyForm, self).__init__(*args, **kwargs)
self.helper = FundingAgencyFormSetHelper()
self.number = 0
self.delete_modal_form = None
if res_short_id:
self.action = "/hsapi/_internal/%s/fundingagency/add-metadata/" % res_short_id
else:
self.action = ""
if not allow_edit:
for fld_name in self.Meta.fields:
self.fields[fld_name].widget.attrs['readonly'] = True
self.fields[fld_name].widget.attrs['style'] = "background-color:white;"
@property
def form_id(self):
"""Render proper form id by prepending 'id_fundingagency_'."""
form_id = 'id_fundingagency_%s' % self.number
return form_id
@property
def form_id_button(self):
"""Render proper form id with quotes."""
form_id = 'id_fundingagency_%s' % self.number
return "'" + form_id + "'"
class Meta:
"""Define meta properties of FundingAgencyForm class."""
model = FundingAgency
# fields that will be displayed are specified here - but not necessarily in the same order
fields = ['agency_name', 'award_title', 'award_number', 'agency_url']
labels = {'agency_name': 'Funding agency name', 'award_title': 'Title of the award',
'award_number': 'Award number', 'agency_url': 'Agency website'}
class FundingAgencyValidationForm(forms.Form):
"""Validate FundingAgencyForm with agency_name, award_title, award_number and agency_url."""
agency_name = forms.CharField(required=True)
award_title = forms.CharField(required=False)
award_number = forms.CharField(required=False)
agency_url = forms.URLField(required=False)
class BaseFormHelper(FormHelper):
"""Render non-repeatable element related forms."""
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, element_name=None,
element_layout=None, *args, **kwargs):
"""Render non-repeatable element related forms."""
coverage_type = kwargs.pop('coverage', None)
element_name_label = kwargs.pop('element_name_label', None)
super(BaseFormHelper, self).__init__(*args, **kwargs)
if res_short_id:
self.form_method = 'post'
self.form_tag = True
if element_name.lower() == 'coverage':
if coverage_type:
self.form_id = 'id-%s-%s' % (element_name.lower(), coverage_type)
else:
self.form_id = 'id-%s' % element_name.lower()
else:
self.form_id = 'id-%s' % element_name.lower()
if element_id:
self.form_action = "/hsapi/_internal/%s/%s/%s/update-metadata/" % \
(res_short_id, element_name.lower(), element_id)
else:
self.form_action = "/hsapi/_internal/%s/%s/add-metadata/" % (res_short_id,
element_name)
else:
self.form_tag = False
# change the first character to uppercase of the element name
element_name = element_name.title()
if element_name_label:
element_name = element_name_label
if element_name == "Subject":
element_name = "Keywords"
elif element_name == "Description":
element_name = "Abstract"
if res_short_id and allow_edit:
self.layout = Layout(
Fieldset(element_name,
element_layout,
HTML('<div style="margin-top:10px">'),
HTML('<button type="button" '
'class="btn btn-primary pull-right btn-form-submit" '
'return false;">Save changes</button>'),
HTML('</div>')
),
) # TODO: TESTING
else:
self.form_tag = False
self.layout = Layout(
Fieldset(element_name,
element_layout,
),
)
class TitleValidationForm(forms.Form):
"""Validate Title form with value."""
value = forms.CharField(max_length=300)
class SubjectsFormHelper(BaseFormHelper):
"""Render Subject form.
This form handles multiple subject elements - this was not implemented as formset
since we are providing one input field to enter multiple keywords (subjects) as comma
separated values
"""
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, element_name=None,
*args, **kwargs):
"""Render subject form.
The order in which the model fields are listed for the FieldSet is the order these
fields will be displayed
"""
field_width = 'form-control input-sm'
layout = Layout(
Field('value', css_class=field_width),
)
super(SubjectsFormHelper, self).__init__(allow_edit, res_short_id, element_id,
element_name, layout, *args, **kwargs)
class SubjectsForm(forms.Form):
"""Render Subjects model form with appropriate attributes."""
value = forms.CharField(max_length=500,
label='',
widget=forms.TextInput(attrs={'placeholder': 'Keywords'}),
help_text='Enter each keyword separated by a comma.')
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, *args, **kwargs):
"""Render Subjects model form with appropriate attributes."""
super(SubjectsForm, self).__init__(*args, **kwargs)
self.helper = SubjectsFormHelper(allow_edit, res_short_id, element_id,
element_name='subject')
self.number = 0
self.delete_modal_form = None
if res_short_id:
self.action = "/hsapi/_internal/%s/subject/add-metadata/" % res_short_id
else:
self.action = ""
if not allow_edit:
for field in list(self.fields.values()):
field.widget.attrs['readonly'] = True
field.widget.attrs['style'] = "background-color:white;"
class AbstractFormHelper(BaseFormHelper):
"""Render Abstract form."""
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, element_name=None,
*args, **kwargs):
"""Render Abstract form.
The order in which the model fields are listed for the FieldSet is the order these
fields will be displayed
"""
field_width = 'form-control input-sm'
layout = Layout(
Field('abstract', css_class=field_width),
)
super(AbstractFormHelper, self).__init__(allow_edit, res_short_id, element_id,
element_name, layout, *args, **kwargs)
class AbstractForm(ModelForm):
"""Render Abstract model form with appropriate attributes."""
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, *args, **kwargs):
"""Render Abstract model form with appropriate attributes."""
super(AbstractForm, self).__init__(*args, **kwargs)
self.helper = AbstractFormHelper(allow_edit, res_short_id, element_id,
element_name='description')
if not allow_edit:
self.fields['abstract'].widget.attrs['disabled'] = True
self.fields['abstract'].widget.attrs['style'] = "background-color:white;"
class Meta:
"""Describe meta properties of AbstractForm."""
model = Description
fields = ['abstract']
exclude = ['content_object']
labels = {'abstract': ''}
class AbstractValidationForm(forms.Form):
"""Validate Abstract form with abstract field."""
abstract = forms.CharField(max_length=5000)
class RightsValidationForm(forms.Form):
"""Validate Rights form with statement and URL field."""
statement = forms.CharField(required=False)
url = forms.URLField(required=False, max_length=500)
def clean(self):
"""Clean data and render proper error messages."""
cleaned_data = super(RightsValidationForm, self).clean()
statement = cleaned_data.get('statement', None)
url = cleaned_data.get('url', None)
if not statement and not url:
self._errors['statement'] = ["A value for statement is missing"]
self._errors['url'] = ["A value for Url is missing"]
return self.cleaned_data
class CoverageTemporalFormHelper(BaseFormHelper):
"""Render Temporal Coverage form."""
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, element_name=None,
*args, **kwargs):
"""Render Temporal Coverage form.
The order in which the model fields are listed for the FieldSet is the order these
fields will be displayed
"""
file_type = kwargs.pop('file_type', False)
form_field_names = ['start', 'end']
crispy_form_fields = get_crispy_form_fields(form_field_names, file_type=file_type)
layout = Layout(*crispy_form_fields)
kwargs['coverage'] = 'temporal'
super(CoverageTemporalFormHelper, self).__init__(allow_edit, res_short_id, element_id,
element_name, layout, *args, **kwargs)
class CoverageTemporalForm(forms.Form):
"""Render Coverage Temporal Form."""
start = forms.DateField(label='Start Date')
end = forms.DateField(label='End Date')
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, *args, **kwargs):
"""Render Coverage Temporal Form."""
file_type = kwargs.pop('file_type', False)
super(CoverageTemporalForm, self).__init__(*args, **kwargs)
self.helper = CoverageTemporalFormHelper(allow_edit, res_short_id, element_id,
element_name='Temporal Coverage',
file_type=file_type)
self.number = 0
self.delete_modal_form = None
if res_short_id:
self.action = "/hsapi/_internal/%s/coverage/add-metadata/" % res_short_id
else:
self.action = ""
if not allow_edit:
for field in list(self.fields.values()):
field.widget.attrs['readonly'] = True
def clean(self):
"""Modify the form's cleaned_data dictionary."""
is_form_errors = False
super(CoverageTemporalForm, self).clean()
start_date = self.cleaned_data.get('start', None)
end_date = self.cleaned_data.get('end', None)
if not start_date:
self._errors['start'] = ["Data for start date is missing"]
is_form_errors = True
if not end_date:
self._errors['end'] = ["Data for end date is missing"]
is_form_errors = True
if start_date > end_date:
self._errors['end'] = ["End date should be date after the start date"]
is_form_errors = True
if is_form_errors:
return self.cleaned_data
if 'name' in self.cleaned_data:
if len(self.cleaned_data['name']) == 0:
del self.cleaned_data['name']
self.cleaned_data['start'] = self.cleaned_data['start'].isoformat()
self.cleaned_data['end'] = self.cleaned_data['end'].isoformat()
self.cleaned_data['value'] = copy.deepcopy(self.cleaned_data)
self.cleaned_data['type'] = 'period'
if 'name' in self.cleaned_data:
del self.cleaned_data['name']
del self.cleaned_data['start']
del self.cleaned_data['end']
return self.cleaned_data
class CoverageSpatialFormHelper(BaseFormHelper):
"""Render layout for CoverageSpatial form."""
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, element_name=None,
*args, **kwargs):
"""Render layout for CoverageSpatial form."""
file_type = kwargs.pop('file_type', False)
layout = Layout()
# the order in which the model fields are listed for the FieldSet is the order these
# fields will be displayed
layout.append(Field('type', id="id_{}_filetype".format('type') if file_type else
"id_{}".format('type')))
form_field_names = ['name', 'projection', 'east', 'north', 'northlimit', 'eastlimit',
'southlimit', 'westlimit', 'units']
crispy_form_fields = get_crispy_form_fields(form_field_names, file_type=file_type)
for field in crispy_form_fields:
layout.append(field)
kwargs['coverage'] = 'spatial'
super(CoverageSpatialFormHelper, self).__init__(allow_edit, res_short_id, element_id,
element_name, layout, *args, **kwargs)
class CoverageSpatialForm(forms.Form):
"""Render CoverateSpatial form."""
TYPE_CHOICES = (
('box', 'Box'),
('point', 'Point')
)
type = forms.ChoiceField(choices=TYPE_CHOICES,
widget=forms.RadioSelect(attrs={'class': 'inline'}), label='')
name = forms.CharField(max_length=200, required=False, label='Place/Area Name')
projection = forms.CharField(max_length=100, required=False,
label='Coordinate System/Geographic Projection')
east = forms.DecimalField(label='Longitude', widget=forms.TextInput())
north = forms.DecimalField(label='Latitude', widget=forms.TextInput())
units = forms.CharField(max_length=50, label='Coordinate Units')
northlimit = forms.DecimalField(label='North Latitude', widget=forms.TextInput())
eastlimit = forms.DecimalField(label='East Longitude', widget=forms.TextInput())
southlimit = forms.DecimalField(label='South Latitude', widget=forms.TextInput())
westlimit = forms.DecimalField(label='West Longitude', widget=forms.TextInput())
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, *args, **kwargs):
"""Render CoverateSpatial form."""
file_type = kwargs.pop('file_type', False)
super(CoverageSpatialForm, self).__init__(*args, **kwargs)
self.helper = CoverageSpatialFormHelper(allow_edit, res_short_id, element_id,
element_name='Spatial Coverage',
file_type=file_type)
self.number = 0
self.delete_modal_form = None
if self.errors:
self.errors.clear()
if res_short_id:
self.action = "/hsapi/_internal/%s/coverage/add-metadata/" % res_short_id
else:
self.action = ""
if len(self.initial) > 0:
self.initial['projection'] = 'WGS 84 EPSG:4326'
self.initial['units'] = 'Decimal degrees'
else:
self.fields['type'].widget.attrs['checked'] = 'checked'
self.fields['projection'].widget.attrs['value'] = 'WGS 84 EPSG:4326'
self.fields['units'].widget.attrs['value'] = 'Decimal degrees'
if not allow_edit:
for field in list(self.fields.values()):
field.widget.attrs['readonly'] = True
else:
self.fields['projection'].widget.attrs['readonly'] = True
self.fields['units'].widget.attrs['readonly'] = True
if file_type:
# add the 'data-map-item' attribute so that map interface can be used for editing
# these fields
self.fields['north'].widget.attrs['data-map-item'] = 'latitude'
self.fields['east'].widget.attrs['data-map-item'] = 'longitude'
self.fields['northlimit'].widget.attrs['data-map-item'] = 'northlimit'
self.fields['eastlimit'].widget.attrs['data-map-item'] = 'eastlimit'
self.fields['southlimit'].widget.attrs['data-map-item'] = 'southlimit'
self.fields['westlimit'].widget.attrs['data-map-item'] = 'westlimit'
def clean(self):
"""Modify the form's cleaned_data dictionary."""
super(CoverageSpatialForm, self).clean()
temp_cleaned_data = copy.deepcopy(self.cleaned_data)
spatial_coverage_type = temp_cleaned_data['type']
is_form_errors = False
if self.errors:
self.errors.clear()
if spatial_coverage_type == 'point':
north = temp_cleaned_data.get('north', None)
east = temp_cleaned_data.get('east', None)
if not north and north != 0:
self._errors['north'] = ["Data for north is missing"]
is_form_errors = True
del self.cleaned_data['north']
if not east and east != 0:
self._errors['east'] = ["Data for east is missing"]
is_form_errors = True
del self.cleaned_data['east']
if is_form_errors:
return self.cleaned_data
if 'northlimit' in temp_cleaned_data:
del temp_cleaned_data['northlimit']
if 'eastlimit' in self.cleaned_data:
del temp_cleaned_data['eastlimit']
if 'southlimit' in temp_cleaned_data:
del temp_cleaned_data['southlimit']
if 'westlimit' in temp_cleaned_data:
del temp_cleaned_data['westlimit']
if 'uplimit' in temp_cleaned_data:
del temp_cleaned_data['uplimit']
if 'downlimit' in temp_cleaned_data:
del temp_cleaned_data['downlimit']
temp_cleaned_data['north'] = str(temp_cleaned_data['north'])
temp_cleaned_data['east'] = str(temp_cleaned_data['east'])
else: # box type coverage
if 'north' in temp_cleaned_data:
del temp_cleaned_data['north']
if 'east' in temp_cleaned_data:
del temp_cleaned_data['east']
if 'elevation' in temp_cleaned_data:
del temp_cleaned_data['elevation']
for limit in ('northlimit', 'eastlimit', 'southlimit', 'westlimit'):
limit_data = temp_cleaned_data.get(limit, None)
# allow value of 0 to go through
if not limit_data and limit_data != 0:
self._errors[limit] = ["Data for %s is missing" % limit]
is_form_errors = True
del self.cleaned_data[limit]
if is_form_errors:
return self.cleaned_data
temp_cleaned_data['northlimit'] = str(temp_cleaned_data['northlimit'])
temp_cleaned_data['eastlimit'] = str(temp_cleaned_data['eastlimit'])
temp_cleaned_data['southlimit'] = str(temp_cleaned_data['southlimit'])
temp_cleaned_data['westlimit'] = str(temp_cleaned_data['westlimit'])
del temp_cleaned_data['type']
if 'projection' in temp_cleaned_data:
if len(temp_cleaned_data['projection']) == 0:
del temp_cleaned_data['projection']
if 'name' in temp_cleaned_data:
if len(temp_cleaned_data['name']) == 0:
del temp_cleaned_data['name']
self.cleaned_data['value'] = copy.deepcopy(temp_cleaned_data)
if 'northlimit' in self.cleaned_data:
del self.cleaned_data['northlimit']
if 'eastlimit' in self.cleaned_data:
del self.cleaned_data['eastlimit']
if 'southlimit' in self.cleaned_data:
del self.cleaned_data['southlimit']
if 'westlimit' in self.cleaned_data:
del self.cleaned_data['westlimit']
if 'uplimit' in self.cleaned_data:
del self.cleaned_data['uplimit']
if 'downlimit' in self.cleaned_data:
del self.cleaned_data['downlimit']
if 'north' in self.cleaned_data:
del self.cleaned_data['north']
if 'east' in self.cleaned_data:
del self.cleaned_data['east']
if 'elevation' in self.cleaned_data:
del self.cleaned_data['elevation']
if 'name' in self.cleaned_data:
del self.cleaned_data['name']
if 'units' in self.cleaned_data:
del self.cleaned_data['units']
if 'zunits' in self.cleaned_data:
del self.cleaned_data['zunits']
if 'projection' in self.cleaned_data:
del self.cleaned_data['projection']
return self.cleaned_data
class LanguageValidationForm(forms.Form):
"""Validate LanguageValidation form with code attribute."""
code = forms.CharField(max_length=3)
class ValidDateValidationForm(forms.Form):
"""Validate DateValidationForm with start_date and end_date attribute."""
start_date = forms.DateField()
end_date = forms.DateField()
def clean(self):
"""Modify the form's cleaned data dictionary."""
cleaned_data = super(ValidDateValidationForm, self).clean()
start_date = cleaned_data.get('start_date', None)
end_date = cleaned_data.get('end_date', None)
if start_date and not end_date:
self._errors['end_date'] = ["End date is missing"]
if end_date and not start_date:
self._errors['start_date'] = ["Start date is missing"]
if not start_date and not end_date:
del self._errors['start_date']
del self._errors['end_date']
if start_date and end_date:
self.cleaned_data['type'] = 'valid'
return self.cleaned_data
def get_crispy_form_fields(field_names, file_type=False):
"""Return a list of objects of type Field.
:param field_names: list of form field names
:param file_type: if true, then this is a metadata form for file type, otherwise, a form
for resource
:return: a list of Field objects
"""
crispy_fields = []
def get_field_id(field_name):
if file_type:
return "id_{}_filetype".format(field_name)
return "id_{}".format(field_name)
for field_name in field_names:
crispy_fields.append(Field(field_name, css_class='form-control input-sm',
id=get_field_id(field_name)))
return crispy_fields
[#4156] improved error message for coverage metadata
"""Django forms for hs_core module."""
import copy
from django.forms import ModelForm, BaseFormSet
from django.contrib.admin.widgets import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Fieldset, HTML
from crispy_forms.bootstrap import Field
from .hydroshare import utils
from .models import Party, Creator, Contributor, validate_user_url, Relation, Source, Identifier, \
FundingAgency, Description
class Helper(object):
"""Render resusable elements to use in Django forms."""
@classmethod
def get_element_add_modal_form(cls, element_name, modal_form_context_name):
"""Apply a modal UI element to a given form.
Used in netCDF and modflow_modelinstance apps
"""
modal_title = "Add %s" % element_name.title()
layout = Layout(
HTML('<div class="modal fade" id="add-element-dialog" tabindex="-1" '
'role="dialog" aria-labelledby="myModalLabel" aria-hidden="true">'
'<div class="modal-dialog">'
'<div class="modal-content">'),
HTML('<form action="{{ form.action }}" '
'method="POST" enctype="multipart/form-data"> '),
HTML('{% csrf_token %} '
'<input name="resource-mode" type="hidden" value="edit"/>'
'<div class="modal-header">'
'<button type="button" class="close" '
'data-dismiss="modal" aria-hidden="true">×'
'</button>'),
HTML('<h4 class="modal-title" id="myModalLabel"> Add Element </h4>'),
HTML('</div>'
'<div class="modal-body">'
'{% csrf_token %}'
'<div class="form-group">'),
HTML('{% load crispy_forms_tags %} {% crispy add_creator_modal_form %} '),
HTML('</div>'
'</div>'
'<div class="modal-footer">'
'<button type="button" class="btn btn-default" '
'data-dismiss="modal">Close</button>'
'<button type="submit" class="btn btn-primary">'
'Save changes</button>'
'</div>'
'</form>'
'</div>'
'</div>'
'</div>')
)
layout[0] = HTML('<div class="modal fade" id="add-%s-dialog" tabindex="-1" role="dialog" '
'aria-labelledby="myModalLabel" aria-hidden="true">'
'<div class="modal-dialog">'
'<div class="modal-content">' % element_name.lower())
layout[1] = HTML('<form action="{{ %s.action }}" method="POST" '
'enctype="multipart/form-data"> ' % modal_form_context_name)
layout[3] = HTML('<h4 class="modal-title" id="myModalLabel"> {title} '
'</h4>'.format(title=modal_title),)
html_str = '{% load crispy_forms_tags %} {% crispy' + ' add_{element}_modal_form'.format(
element=element_name.lower()) + ' %}'
layout[5] = HTML(html_str)
return layout
# the 1st and the 3rd HTML layout objects get replaced in MetaDataElementDeleteForm class
def _get_modal_confirm_delete_matadata_element():
layout = Layout(
HTML('<div class="modal fade" id="delete-metadata-element-dialog" '
'tabindex="-1" role="dialog" aria-labelledby="myModalLabel" '
'aria-hidden="true">'),
HTML('<div class="modal-dialog">'
'<div class="modal-content">'
'<div class="modal-header">'
'<button type="button" class="close" data-dismiss="modal" '
'aria-hidden="true">×</button>'
'<h4 class="modal-title" id="myModalLabel">'
'Delete metadata element</h4>'
'</div>'
'<div class="modal-body">'
'<strong>Are you sure you want to delete this metadata '
'element?</strong>'
'</div>'
'<div class="modal-footer">'
'<button type="button" class="btn btn-default" '
'data-dismiss="modal">Cancel</button>'),
HTML('<a type="button" class="btn btn-danger" href="">Delete</a>'),
HTML('</div>'
'</div>'
'</div>'
'</div>'),
)
return layout
class MetaDataElementDeleteForm(forms.Form):
"""Render a modal that confirms element deletion."""
def __init__(self, res_short_id, element_name, element_id, *args, **kwargs):
"""Render a modal that confirms element deletion.
uses _get_modal_confirm_delete_matadata_element
"""
super(MetaDataElementDeleteForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.delete_element_action = '"/hsapi/_internal/%s/%s/%s/delete-metadata/"' % \
(res_short_id, element_name, element_id)
self.helper.layout = _get_modal_confirm_delete_matadata_element()
self.helper.layout[0] = HTML('<div class="modal fade" id="delete-%s-element-dialog_%s" '
'tabindex="-1" role="dialog" aria-labelledby="myModalLabel" '
'aria-hidden="true">' % (element_name, element_id))
self.helper.layout[2] = HTML('<a type="button" class="btn btn-danger" '
'href=%s>Delete</a>' % self.delete_element_action)
self.helper.form_tag = False
class ExtendedMetadataForm(forms.Form):
"""Render an extensible metadata form via the extended_metadata_layout kwarg."""
def __init__(self, resource_mode='edit', extended_metadata_layout=None, *args, **kwargs):
"""Render an extensible metadata form via the extended_metadata_layout kwarg."""
super(ExtendedMetadataForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.layout = extended_metadata_layout
class CreatorFormSetHelper(FormHelper):
"""Render a creator form with custom HTML5 validation and error display."""
def __init__(self, *args, **kwargs):
"""Render a creator form with custom HTML5 validation and error display."""
super(CreatorFormSetHelper, self).__init__(*args, **kwargs)
# the order in which the model fields are listed for the FieldSet is the order
# these fields will be displayed
field_width = 'form-control input-sm'
self.form_tag = False
self.form_show_errors = True
self.error_text_inline = True
self.html5_required = True
self.layout = Layout(
Fieldset('Creator',
Field('name', css_class=field_width),
Field('description', css_class=field_width),
Field('organization', css_class=field_width),
Field('email', css_class=field_width),
Field('address', css_class=field_width),
Field('phone', css_class=field_width),
Field('homepage', css_class=field_width),
Field('order', css_class=field_width),
),
)
class PartyForm(ModelForm):
"""Render form for creating and editing Party models, aka people."""
def __init__(self, *args, **kwargs):
"""Render form for creating and editing Party models, aka people.
Removes profile link formset and renders proper description URL
"""
if 'initial' in kwargs:
if 'description' in kwargs['initial']:
if kwargs['initial']['description']:
kwargs['initial']['description'] = utils.current_site_url() + \
kwargs['initial']['description']
super(PartyForm, self).__init__(*args, **kwargs)
self.profile_link_formset = None
self.number = 0
class Meta:
"""Describe meta properties of PartyForm.
Fields that will be displayed are specified here - but not necessarily in the same order
"""
model = Party
fields = ['name', 'description', 'organization', 'email', 'address', 'phone', 'homepage']
# TODO: field labels and widgets types to be specified
labels = {'description': 'HydroShare User Identifier (URL)'}
class CreatorForm(PartyForm):
"""Render form for creating and editing Creator models, as in creators of resources."""
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, *args, **kwargs):
"""Render form for creating and editing Creator models, as in creators of resources."""
super(CreatorForm, self).__init__(*args, **kwargs)
self.helper = CreatorFormSetHelper()
self.delete_modal_form = None
if res_short_id:
self.action = "/hsapi/_internal/%s/creator/add-metadata/" % res_short_id
else:
self.action = ""
if not allow_edit:
for fld_name in self.Meta.fields:
self.fields[fld_name].widget.attrs['readonly'] = True
self.fields[fld_name].widget.attrs['style'] = "background-color:white;"
self.fields['order'].widget.attrs['readonly'] = True
self.fields['order'].widget.attrs['style'] = "background-color:white;"
else:
if 'add-metadata' in self.action:
del self.fields['order']
@property
def form_id(self):
"""Render proper form id by prepending 'id_creator_'."""
form_id = 'id_creator_%s' % self.number
return form_id
@property
def form_id_button(self):
"""Render proper form id with quotes around it."""
form_id = 'id_creator_%s' % self.number
return "'" + form_id + "'"
class Meta:
"""Describe meta properties of PartyForm."""
model = Creator
fields = PartyForm.Meta.fields
fields.append("order")
labels = PartyForm.Meta.labels
class PartyValidationForm(forms.Form):
"""Validate form for Party models."""
description = forms.CharField(required=False, validators=[validate_user_url])
name = forms.CharField(required=False, max_length=100)
organization = forms.CharField(max_length=200, required=False)
email = forms.EmailField(required=False)
address = forms.CharField(max_length=250, required=False)
phone = forms.CharField(max_length=25, required=False)
homepage = forms.URLField(required=False)
identifiers = forms.CharField(required=False)
def clean_description(self):
"""Create absolute URL for Party.description field."""
user_absolute_url = self.cleaned_data['description']
if user_absolute_url:
url_parts = user_absolute_url.split('/')
if len(url_parts) > 4:
return '/user/{user_id}/'.format(user_id=url_parts[4])
return user_absolute_url
def clean_identifiers(self):
data = self.cleaned_data['identifiers']
return Party.validate_identifiers(data)
def clean(self):
"""Validate that name and/or organization are present in form data."""
cleaned_data = super(PartyValidationForm, self).clean()
name = cleaned_data.get('name', None)
org = cleaned_data.get('organization', None)
if not org:
if not name or len(name.strip()) == 0:
self._errors['name'] = ["A value for name or organization is required but both "
"are missing"]
return self.cleaned_data
class CreatorValidationForm(PartyValidationForm):
"""Validate form for Creator models. Extends PartyValidationForm."""
order = forms.IntegerField(required=False)
class ContributorValidationForm(PartyValidationForm):
"""Validate form for Contributor models. Extends PartyValidationForm."""
pass
class BaseCreatorFormSet(BaseFormSet):
"""Render BaseFormSet for working with Creator models."""
def add_fields(self, form, index):
"""Pass through add_fields function to super."""
super(BaseCreatorFormSet, self).add_fields(form, index)
def get_metadata(self):
"""Collect and append creator data to form fields."""
creators_data = []
for form in self.forms:
creator_data = {k: v for k, v in list(form.cleaned_data.items())}
if creator_data:
creators_data.append({'creator': creator_data})
return creators_data
class ContributorFormSetHelper(FormHelper):
"""Render layout for Contributor model form and activate required fields."""
def __init__(self, *args, **kwargs):
"""Render layout for Contributor model form and activate required fields."""
super(ContributorFormSetHelper, self).__init__(*args, **kwargs)
# the order in which the model fields are listed for the FieldSet is the order
# these fields will be displayed
field_width = 'form-control input-sm'
self.form_tag = False
self.layout = Layout(
Fieldset('Contributor',
Field('name', css_class=field_width),
Field('description', css_class=field_width),
Field('organization', css_class=field_width),
Field('email', css_class=field_width),
Field('address', css_class=field_width),
Field('phone', css_class=field_width),
Field('homepage', css_class=field_width),
),
)
self.render_required_fields = True,
class ContributorForm(PartyForm):
"""Render Contributor model form with appropriate attributes."""
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, *args, **kwargs):
"""Render Contributor model form with appropriate attributes."""
super(ContributorForm, self).__init__(*args, **kwargs)
self.helper = ContributorFormSetHelper()
self.delete_modal_form = None
if res_short_id:
self.action = "/hsapi/_internal/%s/contributor/add-metadata/" % res_short_id
else:
self.action = ""
if not allow_edit:
for fld_name in self.Meta.fields:
self.fields[fld_name].widget.attrs['readonly'] = True
self.fields[fld_name].widget.attrs['style'] = "background-color:white;"
@property
def form_id(self):
"""Render proper form id by prepending 'id_contributor_'."""
form_id = 'id_contributor_%s' % self.number
return form_id
@property
def form_id_button(self):
"""Render proper form id with quotes around it."""
form_id = 'id_contributor_%s' % self.number
return "'" + form_id + "'"
class Meta:
"""Describe meta properties of ContributorForm, removing 'order' field."""
model = Contributor
fields = PartyForm.Meta.fields
labels = PartyForm.Meta.labels
if 'order' in fields:
fields.remove('order')
class BaseContributorFormSet(BaseFormSet):
"""Render BaseFormSet for working with Contributor models."""
def add_fields(self, form, index):
"""Pass through add_fields function to super."""
super(BaseContributorFormSet, self).add_fields(form, index)
def get_metadata(self):
"""Collect and append contributor data to form fields."""
contributors_data = []
for form in self.forms:
contributor_data = {k: v for k, v in list(form.cleaned_data.items())}
if contributor_data:
contributors_data.append({'contributor': contributor_data})
return contributors_data
class RelationFormSetHelper(FormHelper):
"""Render layout for Relation form including HTML5 valdiation and errors."""
def __init__(self, *args, **kwargs):
"""Render layout for Relation form including HTML5 valdiation and errors."""
super(RelationFormSetHelper, self).__init__(*args, **kwargs)
# the order in which the model fields are listed for the FieldSet is the order
# these fields will be displayed
field_width = 'form-control input-sm'
self.form_tag = False
self.form_show_errors = True
self.error_text_inline = True
self.html5_required = False
self.layout = Layout(
Fieldset('Relation',
Field('type', css_class=field_width),
Field('value', css_class=field_width),
),
)
class RelationForm(ModelForm):
"""Render Relation model form with appropriate attributes."""
def __init__(self, allow_edit=True, res_short_id=None, *args, **kwargs):
"""Render Relation model form with appropriate attributes."""
super(RelationForm, self).__init__(*args, **kwargs)
self.helper = RelationFormSetHelper()
self.number = 0
self.delete_modal_form = None
if res_short_id:
self.action = "/hsapi/_internal/%s/relation/add-metadata/" % res_short_id
else:
self.action = ""
if not allow_edit:
for fld_name in self.Meta.fields:
self.fields[fld_name].widget.attrs['readonly'] = True
self.fields[fld_name].widget.attrs['style'] = "background-color:white;"
@property
def form_id(self):
"""Render proper form id by prepending 'id_relation_'."""
form_id = 'id_relation_%s' % self.number
return form_id
@property
def form_id_button(self):
"""Render form_id with quotes around it."""
form_id = 'id_relation_%s' % self.number
return "'" + form_id + "'"
class Meta:
"""Describe meta properties of RelationForm."""
model = Relation
# fields that will be displayed are specified here - but not necessarily in the same order
fields = ['type', 'value']
labels = {'type': 'Relation type', 'value': 'Related to'}
class RelationValidationForm(forms.Form):
"""Validate RelationForm 'type' and 'value' CharFields."""
type = forms.CharField(max_length=100)
value = forms.CharField(max_length=500)
class SourceFormSetHelper(FormHelper):
"""Render layout for Source form including HTML5 valdiation and errors."""
def __init__(self, *args, **kwargs):
"""Render layout for Source form including HTML5 valdiation and errors."""
super(SourceFormSetHelper, self).__init__(*args, **kwargs)
# the order in which the model fields are listed for the FieldSet is the order these
# fields will be displayed
field_width = 'form-control input-sm'
self.form_tag = False
self.form_show_errors = True
self.error_text_inline = True
self.html5_required = False
self.layout = Layout(
Fieldset('Source',
Field('derived_from', css_class=field_width),
),
)
class SourceForm(ModelForm):
"""Render Source model form with appropriate attributes."""
def __init__(self, allow_edit=True, res_short_id=None, *args, **kwargs):
"""Render Source model form with appropriate attributes."""
super(SourceForm, self).__init__(*args, **kwargs)
self.helper = SourceFormSetHelper()
self.number = 0
self.delete_modal_form = None
self.allow_edit = allow_edit
if res_short_id:
self.action = "/hsapi/_internal/%s/source/add-metadata/" % res_short_id
else:
self.action = ""
if not allow_edit:
self.fields['derived_from'].widget.attrs['readonly'] = True
self.fields['derived_from'].widget.attrs['style'] = "background-color:white;"
@property
def form_id(self):
"""Render proper form id by prepending 'id_source_'."""
form_id = 'id_source_%s' % self.number
return form_id
@property
def form_id_button(self):
"""Render proper form id with quotes."""
form_id = 'id_source_%s' % self.number
return "'" + form_id + "'"
class Meta:
"""Define meta properties for SourceForm."""
model = Source
# fields that will be displayed are specified here - but not necessarily in the same order
fields = ['derived_from']
class SourceValidationForm(forms.Form):
"""Validate derived_from field from SourceForm."""
derived_from = forms.CharField(max_length=300)
class IdentifierFormSetHelper(FormHelper):
"""Render layout for Identifier form including HTML5 valdiation and errors."""
def __init__(self, *args, **kwargs):
"""Render layout for Identifier form including HTML5 valdiation and errors."""
super(IdentifierFormSetHelper, self).__init__(*args, **kwargs)
# the order in which the model fields are listed for the FieldSet is the order these
# fields will be displayed
field_width = 'form-control input-sm'
self.form_tag = False
self.form_show_errors = True
self.error_text_inline = True
self.html5_required = True
self.layout = Layout(
Fieldset('Identifier',
Field('name', css_class=field_width),
Field('url', css_class=field_width),
),
)
class IdentifierForm(ModelForm):
"""Render Identifier model form with appropriate attributes."""
def __init__(self, res_short_id=None, *args, **kwargs):
"""Render Identifier model form with appropriate attributes."""
super(IdentifierForm, self).__init__(*args, **kwargs)
self.fields['name'].widget.attrs['readonly'] = True
self.fields['name'].widget.attrs['style'] = "background-color:white;"
self.fields['url'].widget.attrs['readonly'] = True
self.fields['url'].widget.attrs['style'] = "background-color:white;"
self.helper = IdentifierFormSetHelper()
self.number = 0
self.delete_modal_form = None
if res_short_id:
self.action = "/hsapi/_internal/%s/identifier/add-metadata/" % res_short_id
else:
self.action = ""
class Meta:
"""Define meta properties for IdentifierForm class."""
model = Identifier
# fields that will be displayed are specified here - but not necessarily in the same order
fields = ['name', 'url']
def clean(self):
"""Ensure that identifier name attribute is not blank."""
data = self.cleaned_data
if data['name'].lower() == 'hydroshareidentifier':
raise forms.ValidationError("Identifier name attribute can't have a value "
"of '{}'.".format(data['name']))
return data
class FundingAgencyFormSetHelper(FormHelper):
"""Render layout for FundingAgency form."""
def __init__(self, *args, **kwargs):
"""Render layout for FundingAgency form."""
super(FundingAgencyFormSetHelper, self).__init__(*args, **kwargs)
# the order in which the model fields are listed for the FieldSet is the order these
# fields will be displayed
field_width = 'form-control input-sm'
self.form_tag = False
self.form_show_errors = True
self.error_text_inline = True
self.html5_required = False
self.layout = Layout(
Fieldset('Funding Agency',
Field('agency_name', css_class=field_width),
Field('award_title', css_class=field_width),
Field('award_number', css_class=field_width),
Field('agency_url', css_class=field_width),
),
)
class FundingAgencyForm(ModelForm):
"""Render FundingAgency model form with appropriate attributes."""
def __init__(self, allow_edit=True, res_short_id=None, *args, **kwargs):
"""Render FundingAgency model form with appropriate attributes."""
super(FundingAgencyForm, self).__init__(*args, **kwargs)
self.helper = FundingAgencyFormSetHelper()
self.number = 0
self.delete_modal_form = None
if res_short_id:
self.action = "/hsapi/_internal/%s/fundingagency/add-metadata/" % res_short_id
else:
self.action = ""
if not allow_edit:
for fld_name in self.Meta.fields:
self.fields[fld_name].widget.attrs['readonly'] = True
self.fields[fld_name].widget.attrs['style'] = "background-color:white;"
@property
def form_id(self):
"""Render proper form id by prepending 'id_fundingagency_'."""
form_id = 'id_fundingagency_%s' % self.number
return form_id
@property
def form_id_button(self):
"""Render proper form id with quotes."""
form_id = 'id_fundingagency_%s' % self.number
return "'" + form_id + "'"
class Meta:
"""Define meta properties of FundingAgencyForm class."""
model = FundingAgency
# fields that will be displayed are specified here - but not necessarily in the same order
fields = ['agency_name', 'award_title', 'award_number', 'agency_url']
labels = {'agency_name': 'Funding agency name', 'award_title': 'Title of the award',
'award_number': 'Award number', 'agency_url': 'Agency website'}
class FundingAgencyValidationForm(forms.Form):
"""Validate FundingAgencyForm with agency_name, award_title, award_number and agency_url."""
agency_name = forms.CharField(required=True)
award_title = forms.CharField(required=False)
award_number = forms.CharField(required=False)
agency_url = forms.URLField(required=False)
class BaseFormHelper(FormHelper):
"""Render non-repeatable element related forms."""
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, element_name=None,
element_layout=None, *args, **kwargs):
"""Render non-repeatable element related forms."""
coverage_type = kwargs.pop('coverage', None)
element_name_label = kwargs.pop('element_name_label', None)
super(BaseFormHelper, self).__init__(*args, **kwargs)
if res_short_id:
self.form_method = 'post'
self.form_tag = True
if element_name.lower() == 'coverage':
if coverage_type:
self.form_id = 'id-%s-%s' % (element_name.lower(), coverage_type)
else:
self.form_id = 'id-%s' % element_name.lower()
else:
self.form_id = 'id-%s' % element_name.lower()
if element_id:
self.form_action = "/hsapi/_internal/%s/%s/%s/update-metadata/" % \
(res_short_id, element_name.lower(), element_id)
else:
self.form_action = "/hsapi/_internal/%s/%s/add-metadata/" % (res_short_id,
element_name)
else:
self.form_tag = False
# change the first character to uppercase of the element name
element_name = element_name.title()
if element_name_label:
element_name = element_name_label
if element_name == "Subject":
element_name = "Keywords"
elif element_name == "Description":
element_name = "Abstract"
if res_short_id and allow_edit:
self.layout = Layout(
Fieldset(element_name,
element_layout,
HTML('<div style="margin-top:10px">'),
HTML('<button type="button" '
'class="btn btn-primary pull-right btn-form-submit" '
'return false;">Save changes</button>'),
HTML('</div>')
),
) # TODO: TESTING
else:
self.form_tag = False
self.layout = Layout(
Fieldset(element_name,
element_layout,
),
)
class TitleValidationForm(forms.Form):
"""Validate Title form with value."""
value = forms.CharField(max_length=300)
class SubjectsFormHelper(BaseFormHelper):
"""Render Subject form.
This form handles multiple subject elements - this was not implemented as formset
since we are providing one input field to enter multiple keywords (subjects) as comma
separated values
"""
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, element_name=None,
*args, **kwargs):
"""Render subject form.
The order in which the model fields are listed for the FieldSet is the order these
fields will be displayed
"""
field_width = 'form-control input-sm'
layout = Layout(
Field('value', css_class=field_width),
)
super(SubjectsFormHelper, self).__init__(allow_edit, res_short_id, element_id,
element_name, layout, *args, **kwargs)
class SubjectsForm(forms.Form):
"""Render Subjects model form with appropriate attributes."""
value = forms.CharField(max_length=500,
label='',
widget=forms.TextInput(attrs={'placeholder': 'Keywords'}),
help_text='Enter each keyword separated by a comma.')
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, *args, **kwargs):
"""Render Subjects model form with appropriate attributes."""
super(SubjectsForm, self).__init__(*args, **kwargs)
self.helper = SubjectsFormHelper(allow_edit, res_short_id, element_id,
element_name='subject')
self.number = 0
self.delete_modal_form = None
if res_short_id:
self.action = "/hsapi/_internal/%s/subject/add-metadata/" % res_short_id
else:
self.action = ""
if not allow_edit:
for field in list(self.fields.values()):
field.widget.attrs['readonly'] = True
field.widget.attrs['style'] = "background-color:white;"
class AbstractFormHelper(BaseFormHelper):
"""Render Abstract form."""
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, element_name=None,
*args, **kwargs):
"""Render Abstract form.
The order in which the model fields are listed for the FieldSet is the order these
fields will be displayed
"""
field_width = 'form-control input-sm'
layout = Layout(
Field('abstract', css_class=field_width),
)
super(AbstractFormHelper, self).__init__(allow_edit, res_short_id, element_id,
element_name, layout, *args, **kwargs)
class AbstractForm(ModelForm):
"""Render Abstract model form with appropriate attributes."""
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, *args, **kwargs):
"""Render Abstract model form with appropriate attributes."""
super(AbstractForm, self).__init__(*args, **kwargs)
self.helper = AbstractFormHelper(allow_edit, res_short_id, element_id,
element_name='description')
if not allow_edit:
self.fields['abstract'].widget.attrs['disabled'] = True
self.fields['abstract'].widget.attrs['style'] = "background-color:white;"
class Meta:
"""Describe meta properties of AbstractForm."""
model = Description
fields = ['abstract']
exclude = ['content_object']
labels = {'abstract': ''}
class AbstractValidationForm(forms.Form):
"""Validate Abstract form with abstract field."""
abstract = forms.CharField(max_length=5000)
class RightsValidationForm(forms.Form):
"""Validate Rights form with statement and URL field."""
statement = forms.CharField(required=False)
url = forms.URLField(required=False, max_length=500)
def clean(self):
"""Clean data and render proper error messages."""
cleaned_data = super(RightsValidationForm, self).clean()
statement = cleaned_data.get('statement', None)
url = cleaned_data.get('url', None)
if not statement and not url:
self._errors['statement'] = ["A value for statement is missing"]
self._errors['url'] = ["A value for Url is missing"]
return self.cleaned_data
class CoverageTemporalFormHelper(BaseFormHelper):
"""Render Temporal Coverage form."""
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, element_name=None,
*args, **kwargs):
"""Render Temporal Coverage form.
The order in which the model fields are listed for the FieldSet is the order these
fields will be displayed
"""
file_type = kwargs.pop('file_type', False)
form_field_names = ['start', 'end']
crispy_form_fields = get_crispy_form_fields(form_field_names, file_type=file_type)
layout = Layout(*crispy_form_fields)
kwargs['coverage'] = 'temporal'
super(CoverageTemporalFormHelper, self).__init__(allow_edit, res_short_id, element_id,
element_name, layout, *args, **kwargs)
class CoverageTemporalForm(forms.Form):
"""Render Coverage Temporal Form."""
start = forms.DateField(label='Start Date')
end = forms.DateField(label='End Date')
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, *args, **kwargs):
"""Render Coverage Temporal Form."""
file_type = kwargs.pop('file_type', False)
super(CoverageTemporalForm, self).__init__(*args, **kwargs)
self.helper = CoverageTemporalFormHelper(allow_edit, res_short_id, element_id,
element_name='Temporal Coverage',
file_type=file_type)
self.number = 0
self.delete_modal_form = None
if res_short_id:
self.action = "/hsapi/_internal/%s/coverage/add-metadata/" % res_short_id
else:
self.action = ""
if not allow_edit:
for field in list(self.fields.values()):
field.widget.attrs['readonly'] = True
def clean(self):
"""Modify the form's cleaned_data dictionary."""
is_form_errors = False
super(CoverageTemporalForm, self).clean()
start_date = self.cleaned_data.get('start', None)
end_date = self.cleaned_data.get('end', None)
if self.errors:
self.errors.clear()
if start_date is None:
self.add_error('start', "Data for start date is missing")
is_form_errors = True
if end_date is None:
self.add_error('end', "Data for end date is missing")
is_form_errors = True
if not is_form_errors:
if start_date > end_date:
self.add_error('end', "End date should be a date after the start date")
is_form_errors = True
if is_form_errors:
return self.cleaned_data
if 'name' in self.cleaned_data:
if len(self.cleaned_data['name']) == 0:
del self.cleaned_data['name']
self.cleaned_data['start'] = self.cleaned_data['start'].isoformat()
self.cleaned_data['end'] = self.cleaned_data['end'].isoformat()
self.cleaned_data['value'] = copy.deepcopy(self.cleaned_data)
self.cleaned_data['type'] = 'period'
if 'name' in self.cleaned_data:
del self.cleaned_data['name']
del self.cleaned_data['start']
del self.cleaned_data['end']
return self.cleaned_data
class CoverageSpatialFormHelper(BaseFormHelper):
"""Render layout for CoverageSpatial form."""
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, element_name=None,
*args, **kwargs):
"""Render layout for CoverageSpatial form."""
file_type = kwargs.pop('file_type', False)
layout = Layout()
# the order in which the model fields are listed for the FieldSet is the order these
# fields will be displayed
layout.append(Field('type', id="id_{}_filetype".format('type') if file_type else
"id_{}".format('type')))
form_field_names = ['name', 'projection', 'east', 'north', 'northlimit', 'eastlimit',
'southlimit', 'westlimit', 'units']
crispy_form_fields = get_crispy_form_fields(form_field_names, file_type=file_type)
for field in crispy_form_fields:
layout.append(field)
kwargs['coverage'] = 'spatial'
super(CoverageSpatialFormHelper, self).__init__(allow_edit, res_short_id, element_id,
element_name, layout, *args, **kwargs)
class CoverageSpatialForm(forms.Form):
"""Render CoverateSpatial form."""
TYPE_CHOICES = (
('box', 'Box'),
('point', 'Point')
)
type = forms.ChoiceField(choices=TYPE_CHOICES,
widget=forms.RadioSelect(attrs={'class': 'inline'}), label='')
name = forms.CharField(max_length=200, required=False, label='Place/Area Name')
projection = forms.CharField(max_length=100, required=False,
label='Coordinate System/Geographic Projection')
east = forms.DecimalField(label='Longitude', widget=forms.TextInput())
north = forms.DecimalField(label='Latitude', widget=forms.TextInput())
units = forms.CharField(max_length=50, label='Coordinate Units')
northlimit = forms.DecimalField(label='North Latitude', widget=forms.TextInput())
eastlimit = forms.DecimalField(label='East Longitude', widget=forms.TextInput())
southlimit = forms.DecimalField(label='South Latitude', widget=forms.TextInput())
westlimit = forms.DecimalField(label='West Longitude', widget=forms.TextInput())
def __init__(self, allow_edit=True, res_short_id=None, element_id=None, *args, **kwargs):
"""Render CoverateSpatial form."""
file_type = kwargs.pop('file_type', False)
super(CoverageSpatialForm, self).__init__(*args, **kwargs)
self.helper = CoverageSpatialFormHelper(allow_edit, res_short_id, element_id,
element_name='Spatial Coverage',
file_type=file_type)
self.number = 0
self.delete_modal_form = None
if res_short_id:
self.action = "/hsapi/_internal/%s/coverage/add-metadata/" % res_short_id
else:
self.action = ""
if len(self.initial) > 0:
self.initial['projection'] = 'WGS 84 EPSG:4326'
self.initial['units'] = 'Decimal degrees'
else:
self.fields['type'].widget.attrs['checked'] = 'checked'
self.fields['projection'].widget.attrs['value'] = 'WGS 84 EPSG:4326'
self.fields['units'].widget.attrs['value'] = 'Decimal degrees'
if not allow_edit:
for field in list(self.fields.values()):
field.widget.attrs['readonly'] = True
else:
self.fields['projection'].widget.attrs['readonly'] = True
self.fields['units'].widget.attrs['readonly'] = True
if file_type:
# add the 'data-map-item' attribute so that map interface can be used for editing
# these fields
self.fields['north'].widget.attrs['data-map-item'] = 'latitude'
self.fields['east'].widget.attrs['data-map-item'] = 'longitude'
self.fields['northlimit'].widget.attrs['data-map-item'] = 'northlimit'
self.fields['eastlimit'].widget.attrs['data-map-item'] = 'eastlimit'
self.fields['southlimit'].widget.attrs['data-map-item'] = 'southlimit'
self.fields['westlimit'].widget.attrs['data-map-item'] = 'westlimit'
def clean(self):
"""Modify the form's cleaned_data dictionary."""
super(CoverageSpatialForm, self).clean()
temp_cleaned_data = copy.deepcopy(self.cleaned_data)
spatial_coverage_type = temp_cleaned_data['type']
is_form_errors = False
if self.errors:
self.errors.clear()
if spatial_coverage_type == 'point':
north = temp_cleaned_data.get('north', None)
east = temp_cleaned_data.get('east', None)
if not north and north != 0:
self.add_error('north', "Data for longitude is missing")
is_form_errors = True
if not east and east != 0:
self.add_error('east', "Data for latitude is missing")
is_form_errors = True
if is_form_errors:
return self.cleaned_data
if 'northlimit' in temp_cleaned_data:
del temp_cleaned_data['northlimit']
if 'eastlimit' in self.cleaned_data:
del temp_cleaned_data['eastlimit']
if 'southlimit' in temp_cleaned_data:
del temp_cleaned_data['southlimit']
if 'westlimit' in temp_cleaned_data:
del temp_cleaned_data['westlimit']
if 'uplimit' in temp_cleaned_data:
del temp_cleaned_data['uplimit']
if 'downlimit' in temp_cleaned_data:
del temp_cleaned_data['downlimit']
temp_cleaned_data['north'] = str(temp_cleaned_data['north'])
temp_cleaned_data['east'] = str(temp_cleaned_data['east'])
else: # box type coverage
if 'north' in temp_cleaned_data:
del temp_cleaned_data['north']
if 'east' in temp_cleaned_data:
del temp_cleaned_data['east']
if 'elevation' in temp_cleaned_data:
del temp_cleaned_data['elevation']
box_fields_map = {"northlimit": "north latitude", "southlimit": "south latitude",
"eastlimit": "east longitude", "westlimit": "west longitude"}
for limit in box_fields_map.keys():
limit_data = temp_cleaned_data.get(limit, None)
# allow value of 0 to go through
if not limit_data and limit_data != 0:
self.add_error(limit, "Data for %s is missing" % box_fields_map[limit])
is_form_errors = True
if is_form_errors:
return self.cleaned_data
temp_cleaned_data['northlimit'] = str(temp_cleaned_data['northlimit'])
temp_cleaned_data['eastlimit'] = str(temp_cleaned_data['eastlimit'])
temp_cleaned_data['southlimit'] = str(temp_cleaned_data['southlimit'])
temp_cleaned_data['westlimit'] = str(temp_cleaned_data['westlimit'])
del temp_cleaned_data['type']
if 'projection' in temp_cleaned_data:
if len(temp_cleaned_data['projection']) == 0:
del temp_cleaned_data['projection']
if 'name' in temp_cleaned_data:
if len(temp_cleaned_data['name']) == 0:
del temp_cleaned_data['name']
self.cleaned_data['value'] = copy.deepcopy(temp_cleaned_data)
if 'northlimit' in self.cleaned_data:
del self.cleaned_data['northlimit']
if 'eastlimit' in self.cleaned_data:
del self.cleaned_data['eastlimit']
if 'southlimit' in self.cleaned_data:
del self.cleaned_data['southlimit']
if 'westlimit' in self.cleaned_data:
del self.cleaned_data['westlimit']
if 'uplimit' in self.cleaned_data:
del self.cleaned_data['uplimit']
if 'downlimit' in self.cleaned_data:
del self.cleaned_data['downlimit']
if 'north' in self.cleaned_data:
del self.cleaned_data['north']
if 'east' in self.cleaned_data:
del self.cleaned_data['east']
if 'elevation' in self.cleaned_data:
del self.cleaned_data['elevation']
if 'name' in self.cleaned_data:
del self.cleaned_data['name']
if 'units' in self.cleaned_data:
del self.cleaned_data['units']
if 'zunits' in self.cleaned_data:
del self.cleaned_data['zunits']
if 'projection' in self.cleaned_data:
del self.cleaned_data['projection']
return self.cleaned_data
class LanguageValidationForm(forms.Form):
"""Validate LanguageValidation form with code attribute."""
code = forms.CharField(max_length=3)
class ValidDateValidationForm(forms.Form):
"""Validate DateValidationForm with start_date and end_date attribute."""
start_date = forms.DateField()
end_date = forms.DateField()
def clean(self):
"""Modify the form's cleaned data dictionary."""
cleaned_data = super(ValidDateValidationForm, self).clean()
start_date = cleaned_data.get('start_date', None)
end_date = cleaned_data.get('end_date', None)
if start_date and not end_date:
self._errors['end_date'] = ["End date is missing"]
if end_date and not start_date:
self._errors['start_date'] = ["Start date is missing"]
if not start_date and not end_date:
del self._errors['start_date']
del self._errors['end_date']
if start_date and end_date:
self.cleaned_data['type'] = 'valid'
return self.cleaned_data
def get_crispy_form_fields(field_names, file_type=False):
"""Return a list of objects of type Field.
:param field_names: list of form field names
:param file_type: if true, then this is a metadata form for file type, otherwise, a form
for resource
:return: a list of Field objects
"""
crispy_fields = []
def get_field_id(field_name):
if file_type:
return "id_{}_filetype".format(field_name)
return "id_{}".format(field_name)
for field_name in field_names:
crispy_fields.append(Field(field_name, css_class='form-control input-sm',
id=get_field_id(field_name)))
return crispy_fields
|
# -*- coding: utf-8 -*-
from contextlib import contextmanager
import gc
import math
import operator
import pickle
import random
from random import Random
import time
import pytest
import gauge
from gauge import Gauge, Momentum
from gauge.constants import ADD, CLAMP, inf, NONE, OK, ONCE, REMOVE
from gauge.deterministic import (
Boundary, Determination, Horizon, Line, Ray, Segment)
PRECISION = 8
TIME, VALUE = 0, 1
class FakeGauge(Gauge):
def __init__(self, determination):
super(FakeGauge, self).__init__(0, 0, 0, 0)
self._determination = Determination.__new__(Determination)
self._determination.extend(determination)
@property
def determination(self):
return self._determination
def round_(x):
return round(x, PRECISION)
@contextmanager
def t(timestamp):
gauge.core.now = lambda: float(timestamp)
try:
yield
finally:
gauge.core.now = time.time
def round_determination(determination, precision=0):
return [(round(time, precision), round(value, precision))
for time, value in determination]
def is_gauge(x):
"""Whether the value is an instance of :class:`Gauge`."""
return isinstance(x, Gauge)
def shift_gauge(gauge, delta=0):
"""Adds the given delta to a gauge."""
if gauge.max_gauge is None:
max_ = gauge.max_value + delta
else:
max_ = shift_gauge(gauge.max_gauge, delta)
if gauge.min_gauge is None:
min_ = gauge.min_value + delta
else:
min_ = shift_gauge(gauge.min_gauge, delta)
g = Gauge(gauge.base[VALUE] + delta, max_, min_, gauge.base[TIME])
for momentum in gauge.momenta:
g.add_momentum(momentum)
return g
def test_momenta_in_range():
g = Gauge(12, 100, at=0)
g.add_momentum(+1, since=1, until=6)
g.add_momentum(-1, since=3, until=8)
assert g.determination == [
(0, 12), (1, 12), (3, 14), (6, 14), (8, 12)]
def test_over_max():
g = Gauge(8, 10, at=0)
g.add_momentum(+1, since=0, until=4)
assert g.determination == [(0, 8), (2, 10), (4, 10)]
g = Gauge(12, 10, at=0)
g.add_momentum(-1, since=0, until=4)
assert g.determination == [(0, 12), (2, 10), (4, 8)]
g = Gauge(12, 10, at=0)
g.add_momentum(+1, since=0, until=4)
g.add_momentum(-2, since=0, until=4)
assert g.determination == [(0, 12), (1, 10), (4, 7)]
g = Gauge(12, 10, at=0)
g.add_momentum(+1, since=1, until=6)
g.add_momentum(-1, since=3, until=8)
g.add_momentum(+1, since=10, until=14)
g.add_momentum(-1, since=13, until=16)
assert g.determination == [
(0, 12), (1, 12), (3, 12), (5, 10), (6, 10), (8, 8),
(10, 8), (12, 10), (13, 10), (14, 10), (16, 8)]
def test_under_min():
g = Gauge(2, 10, at=0)
g.add_momentum(-1, since=0, until=4)
assert g.determination == [(0, 2), (2, 0), (4, 0)]
g = Gauge(-2, 10, at=0)
g.add_momentum(+1, since=0, until=4)
assert g.determination == [(0, -2), (2, 0), (4, 2)]
g = Gauge(-2, 10, at=0)
g.add_momentum(-1, since=0, until=4)
g.add_momentum(+2, since=0, until=4)
assert g.determination == [(0, -2), (1, 0), (4, 3)]
g = Gauge(-2, 10, at=0)
g.add_momentum(-1, since=1, until=6)
g.add_momentum(+1, since=3, until=8)
g.add_momentum(-1, since=10, until=14)
g.add_momentum(+1, since=13, until=16)
assert g.determination == [
(0, -2), (1, -2), (3, -2), (5, 0), (6, 0), (8, 2),
(10, 2), (12, 0), (13, 0), (14, 0), (16, 2)]
def test_permanent():
g = Gauge(10, 10, at=0)
g.add_momentum(-1)
assert g.determination == [(0, 10), (10, 0)]
g = Gauge(0, 10, at=0)
g.add_momentum(+1)
assert g.determination == [(0, 0), (10, 10)]
g = Gauge(12, 10, at=0)
g.add_momentum(-1)
assert g.determination == [(0, 12), (2, 10), (12, 0)]
g = Gauge(5, 10, at=0)
g.add_momentum(+1, since=3)
assert g.determination == [(0, 5), (3, 5), (8, 10)]
g = Gauge(5, 10, at=0)
g.add_momentum(+1, until=8)
assert g.determination == [(0, 5), (5, 10), (8, 10)]
def test_life():
with t(0):
life = Gauge(100, 100)
life.add_momentum(-1)
assert life.get() == 100
with t(1):
assert life.get() == 99
with t(2):
assert life.get() == 98
with t(10):
assert life.get() == 90
life.incr(1)
assert life.get() == 91
with t(11):
assert life.get() == 90
def test_no_momentum():
g = Gauge(1, 10, at=0)
assert g.determination == [(0, 1)]
assert g.get() == 1
def test_ok_outbound():
g = Gauge(1, 10)
with pytest.raises(ValueError):
g.set(11)
with pytest.raises(ValueError):
g.incr(100)
with pytest.raises(ValueError):
g.decr(100)
g.set(10)
assert g.get() == 10
g.set(11, outbound=OK)
assert g.get() == 11
def test_once_outbound():
g = Gauge(1, 10)
assert g.incr(5, outbound=ONCE) == 6
assert g.incr(5, outbound=ONCE) == 11
with pytest.raises(ValueError):
g.incr(1, outbound=ONCE)
def test_clamp_outbound():
g = Gauge(1, 10)
g.set(11, outbound=CLAMP)
assert g.get() == 10
g.incr(100, outbound=CLAMP)
assert g.get() == 10
g.decr(100, outbound=CLAMP)
assert g.get() == 0
g.incr(3, outbound=CLAMP)
assert g.get() == 3
g.decr(1, outbound=CLAMP)
assert g.get() == 2
g.set(100, outbound=OK)
g.incr(3, outbound=CLAMP)
assert g.get() == 100
g.decr(3, outbound=CLAMP)
assert g.get() == 97
g.set(98, outbound=CLAMP)
assert g.get() == 97
g.set(97, outbound=CLAMP)
assert g.get() == 97
g.set(96, outbound=CLAMP)
assert g.get() == 96
def test_set_min_max():
# without momentum
g = Gauge(5, 10)
assert g.get_max() == 10
assert g.get_min() == 0
assert g.get() == 5
g.set_range(max=100, min=10)
assert g.get_max() == 100
assert g.get_min() == 10
assert g.get() == 10
g.set_min(10)
assert g.get() == 10
g.set_min(5)
assert g.get() == 10
g.set_range(max=5, min=0)
assert g.get_max() == 5
assert g.get_min() == 0
assert g.get() == 5
# with momentum
g = Gauge(5, 10, at=0)
g.add_momentum(+1)
assert g.determination == [(0, 5), (5, 10)]
g.set_max(50, at=0)
assert g.determination == [(0, 5), (45, 50)]
g.set_min(40, at=0)
assert g.determination == [(0, 40), (10, 50)]
def test_pickle():
g = Gauge(0, 10, at=0)
r = Random(17171771)
for x in range(10000):
since = r.randrange(1000)
until = since + 1 + r.randrange(1000)
g.add_momentum(r.uniform(-10, +10), since=since, until=until)
data = pickle.dumps(g)
g2 = pickle.loads(data)
assert g.determination == g2.determination
def test_make_momentum():
g = Gauge(0, 10, at=0)
m = g.add_momentum(+1)
assert isinstance(m, Momentum)
with pytest.raises(TypeError):
g.add_momentum(m, since=1)
with pytest.raises(TypeError):
g.add_momentum(m, until=2)
def test_clear_momenta():
g = Gauge(0, 10, at=0)
g.add_momentum(+1)
g.clear_momenta(at=5)
assert g.get(5) == 5
assert g.determination == [(5, 5)]
# clear momenta when the value is out of the range
g.add_momentum(+1)
g.set(15, outbound=OK, at=10)
g.clear_momenta(at=10)
assert g.get(10) == 15
assert g.determination == [(10, 15)]
# rebase by Gauge.clear_momenta()
g.clear_momenta(100)
assert g.get() == 100
def test_when():
g = Gauge(0, 10, at=0)
assert g.when(0) == 0
with pytest.raises(ValueError):
g.when(10)
g.add_momentum(+1)
assert g.when(10) == 10
g.add_momentum(+1, since=3, until=5)
assert g.when(10) == 8
g.add_momentum(-2, since=4, until=8)
assert g.when(0) == 0
assert g.when(1) == 1
assert g.when(2) == 2
assert g.when(3) == 3
assert g.when(4) == 3.5
assert g.when(5) == 4
assert g.when(6) == 12
assert g.when(7) == 13
assert g.when(8) == 14
assert g.when(9) == 15
assert g.when(10) == 16
with pytest.raises(ValueError):
g.when(11)
def test_whenever():
g = Gauge(0, 10, at=0)
g.add_momentum(+1)
g.add_momentum(-2, since=3, until=4)
g.add_momentum(-2, since=5, until=6)
g.add_momentum(-2, since=7, until=8)
assert g.when(3) == 3
assert g.when(3, after=1) == 5
assert g.when(3, after=2) == 7
assert g.when(3, after=3) == 9
with pytest.raises(ValueError):
g.when(3, after=4)
whenever = g.whenever(3)
assert list(whenever) == [3, 5, 7, 9]
# inverse
g = Gauge(10, 10, at=0)
g.add_momentum(-1)
g.add_momentum(+2, since=3, until=4)
g.add_momentum(+2, since=5, until=6)
g.add_momentum(+2, since=7, until=8)
assert g.when(7) == 3
assert g.when(7, after=1) == 5
def test_since_gte_until():
g = Gauge(0, 10, at=0)
with pytest.raises(ValueError):
g.add_momentum(+1, since=1, until=1)
with pytest.raises(ValueError):
g.add_momentum(+1, since=2, until=1)
def test_repr():
g = Gauge(0, 10, at=0)
assert repr(g) == '<Gauge 0.00/10.00>'
g.set_min(-10, at=0)
assert repr(g) == '<Gauge 0.00 between -10.00~10.00>'
g.set_max(Gauge(10, 10), at=0)
assert repr(g) == '<Gauge 0.00 between -10.00~<Gauge 10.00/10.00>>'
m = Momentum(+100, since=10, until=20)
assert repr(m) == '<Momentum +100.00/s 10.00~20.00>'
m = Momentum(+100, since=10)
assert repr(m) == '<Momentum +100.00/s 10.00~>'
m = Momentum(+100, until=20)
assert repr(m) == '<Momentum +100.00/s ~20.00>'
h = Horizon(10, 20, 30)
assert repr(h) == '<Line[HORIZON] 30.00 for 10.00~20.00>'
r = Ray(10, 20, 30, 40)
assert repr(r) == '<Line[RAY] 30.00+40.00/s for 10.00~20.00>'
def test_case1():
g = Gauge(0, 5, at=0)
g.add_momentum(+1)
g.add_momentum(-2, since=1, until=3)
g.add_momentum(+1, since=5, until=7)
assert g.determination == [
(0, 0), (1, 1), (2, 0), (3, 0), (5, 2), (6.5, 5), (7, 5)]
def test_case2():
g = Gauge(12, 10, at=0)
g.add_momentum(+2, since=2, until=10)
g.add_momentum(-1, since=4, until=8)
assert g.determination == [
(0, 12), (2, 12), (4, 12), (6, 10), (8, 10), (10, 10)]
def test_case3():
g = Gauge(0, 10, at=0)
assert g.get(0) == 0
g.add_momentum(+1, since=0)
assert g.get(10) == 10
g.incr(3, outbound=OK, at=11)
assert g.get(11) == 13
g.add_momentum(-1, since=13)
assert g.get(13) == 13
assert g.get(14) == 12
assert g.get(15) == 11
assert g.get(16) == 10
assert g.get(17) == 10
def test_case4():
g = Gauge(0, 10, at=0)
g.add_momentum(+1)
g.add_momentum(+1)
assert g.determination == [(0, 0), (5, 10)]
def test_case5():
g = Gauge(1, 1, 0, at=0)
for x in range(11):
g.add_momentum(-0.1, since=x, until=x + 1)
assert g.get(11) == 0 # adjusted by min=0
def test_case6():
g = Gauge(1, 10, at=1417868986.94428)
g.add_momentum(+0.167)
g.add_momentum(-0.417, since=1417863954.884099)
assert g.determination[-1][VALUE] == 0
def test_remove_momentum():
g = Gauge(0, 10, at=0)
m1 = g.add_momentum(+1)
m2 = g.add_momentum(Momentum(+1))
g.add_momentum(+2, since=10)
g.add_momentum(-3, until=100)
assert len(g.momenta) == 4
assert g.remove_momentum(m2) == m2
assert len(g.momenta) == 3
assert m1 in g.momenta
assert m2 in g.momenta
assert g.remove_momentum(m2) == m2
assert len(g.momenta) == 2
assert m1 not in g.momenta
assert m2 not in g.momenta
with pytest.raises(ValueError):
g.remove_momentum(+2)
assert g.remove_momentum(+2, since=10) == (+2, 10, +inf)
assert len(g.momenta) == 1
assert g.remove_momentum(Momentum(-3, until=100)) == (-3, -inf, 100)
assert not g.momenta
def test_remove_momentum_event_on_remove_momentum():
g = Gauge(0, 10, at=0)
g.add_momentum(+1)
assert g.determination == [(0, 0), (10, 10)]
g.remove_momentum(+1)
g.add_momentum(+1)
assert g.determination == [(0, 0), (10, 10)]
g.remove_momentum(+1)
g.add_momentum(+1)
g.add_momentum(+1)
assert g.determination == [(0, 0), (5, 10)]
g.clear_momenta(at=0)
g.add_momentum(+1)
assert g.determination == [(0, 0), (10, 10)]
def test_momenta_order():
g = Gauge(0, 50, at=0)
g.add_momentum(+3, since=0, until=5)
g.add_momentum(+2, since=1, until=4)
g.add_momentum(+1, since=2, until=3)
assert g.get(0) == 0
assert g.get(1) == 3
assert g.get(2) == 8
assert g.get(3) == 14
g.decr(1, at=3)
assert g.get(3) == 13
assert g.get(4) == 18
assert g.get(5) == 21
def test_forget_past():
g = Gauge(0, 50, at=0)
g.add_momentum(+1, since=0, until=5)
g.add_momentum(0, since=0)
g.add_momentum(0, until=999)
assert g.get(0) == 0
assert g.get(1) == 1
assert g.get(2) == 2
assert g.get(3) == 3
assert g.get(4) == 4
assert g.get(5) == 5
assert g.get(10) == 5
assert g.get(20) == 5
assert len(g.momenta) == 3
g.forget_past(at=30)
assert len(g.momenta) == 2
def test_extensibility_of_make_momentum():
class MyGauge(Gauge):
def _make_momentum(self, *args):
args = args[::-1]
return super(MyGauge, self)._make_momentum(*args)
g = MyGauge(0, 10, at=0)
m = g.add_momentum(3, 2, 1)
assert m == (1, 2, 3)
def test_just_one_momentum():
def gen_gauge(since=None, until=None):
g = Gauge(5, 10, at=0)
g.add_momentum(+0.1, since, until)
return g
# None ~ None
g = gen_gauge()
assert g.determination == [(0, 5), (50, 10)]
# 0 ~ None
g = gen_gauge(since=0)
assert g.determination == [(0, 5), (50, 10)]
# None ~ 100
g = gen_gauge(until=100)
assert g.determination == [(0, 5), (50, 10), (100, 10)]
# 0 ~ 100
g = gen_gauge(since=0, until=100)
assert g.determination == [(0, 5), (50, 10), (100, 10)]
# -100 ~ 100
g = gen_gauge(since=-100, until=100)
assert g.determination == [(0, 5), (50, 10), (100, 10)]
def test_velocity():
g = Gauge(0, 10, at=0)
g.add_momentum(+1, since=2)
g.add_momentum(+1, since=4, until=6)
assert g.velocity(at=0) == 0
assert g.velocity(at=2) == +1
assert g.velocity(at=3) == +1
assert g.velocity(at=4) == +2
assert g.velocity(at=5) == +2
assert g.velocity(at=6) == +1
assert g.velocity(at=7) == +1
assert g.velocity(at=8) == +1
assert g.velocity(at=9) == +1
assert g.velocity(at=10) == 0
def test_lines():
horizon = Horizon(0, 10, 1234)
assert horizon.get(0) == 1234
assert horizon.get(10) == 1234
assert horizon.guess(100) == 1234
assert horizon.guess(-100) == 1234
ray = Ray(0, 10, 0, velocity=+1)
assert ray.get(0) == 0
assert ray.get(5) == 5
with pytest.raises(ValueError):
ray.get(-1)
with pytest.raises(ValueError):
ray.get(11)
assert ray.guess(-1) == 0
assert ray.guess(11) == 10
assert ray.intersect(Horizon(0, 10, 5)) == (5, 5)
assert ray.intersect(Horizon(0, 10, 10)) == (10, 10)
assert ray.intersect(Horizon(0, +inf, 5)) == (5, 5)
with pytest.raises(ValueError):
ray.intersect(Horizon(0, 10, 15))
with pytest.raises(ValueError):
ray.intersect(Horizon(6, 10, 5))
with pytest.raises(ValueError):
ray.intersect(Horizon(-inf, +inf, 5))
ray = Ray(0, +inf, 0, velocity=+1)
assert ray.get(100) == 100
assert ray.get(100000) == 100000
seg = Segment(0, 10, -50.05804016454045, 12.780503036230357)
assert seg.get(10) == 12.780503036230357
assert seg.guess(100) == 12.780503036230357
assert seg.guess(-100) == -50.05804016454045
def test_boundary():
# walk
lines = [Horizon(0, 10, 0),
Ray(10, 20, 0, velocity=+1),
Ray(20, 30, 10, velocity=-1)]
boundary = Boundary(lines)
assert boundary.line is lines[0]
boundary.walk()
assert boundary.line is lines[1]
boundary.walk()
assert boundary.line is lines[2]
with pytest.raises(StopIteration):
boundary.walk()
# cmp
assert boundary.cmp(1, 2)
assert not boundary.cmp(2, 1)
assert boundary.cmp_eq(1, 2)
assert boundary.cmp_eq(1, 1)
assert not boundary.cmp_eq(2, 1)
assert boundary.cmp_inv(2, 1)
assert not boundary.cmp_inv(1, 2)
assert not boundary.cmp_inv(1, 1)
# best
zero_line = Segment(0, 0, 0, 0)
ceil = Boundary([zero_line], operator.lt)
floor = Boundary([zero_line], operator.gt)
assert ceil.best is min
assert floor.best is max
# repr
assert repr(ceil) == ('<Boundary line={0}, cmp=<built-in function lt>>'
''.format(zero_line))
@pytest.fixture
def zigzag():
g = Gauge(1, Gauge(2, 3, 2, at=0), Gauge(1, 1, 0, at=0), at=0)
for x in range(6):
g.max_gauge.add_momentum(+1, since=x * 2, until=x * 2 + 1)
g.max_gauge.add_momentum(-1, since=x * 2 + 1, until=x * 2 + 2)
g.min_gauge.add_momentum(-1, since=x * 2, until=x * 2 + 1)
g.min_gauge.add_momentum(+1, since=x * 2 + 1, until=x * 2 + 2)
for x in range(3):
t = sum(y * 2 for y in range(x + 1))
g.add_momentum(+1, since=t, until=t + (x + 1))
g.add_momentum(-1, since=t + (x + 1), until=t + 2 * (x + 1))
return g
@pytest.fixture
def bidir():
g = Gauge(5, Gauge(10, 10, at=0), Gauge(0, 10, at=0), at=0)
g.add_momentum(+1, since=0, until=3)
g.add_momentum(-1, since=3, until=6)
g.add_momentum(+1, since=6, until=9)
g.add_momentum(-1, since=9, until=12)
g.max_gauge.add_momentum(-1, since=0, until=4)
g.max_gauge.add_momentum(+1, since=6, until=7)
g.min_gauge.add_momentum(+1, since=1, until=6)
g.min_gauge.add_momentum(-1, since=6, until=8)
return g
def test_hypergauge_case1():
g = Gauge(12, 100, at=0)
g.add_momentum(+1, since=1, until=6)
g.add_momentum(-1, since=3, until=8)
g.set_max(Gauge(15, 15, at=0), at=0)
g.max_gauge.add_momentum(-1, until=5)
assert g.determination == [
(0, 12), (1, 12), (2, 13), (3, 12), (5, 10), (6, 10), (8, 8)]
assert g.max_gauge.determination == [(0, 15), (5, 10)]
def test_hypergauge_case2():
g = Gauge(12, 100, at=0)
g.add_momentum(+1, since=1, until=6)
g.add_momentum(-1, since=3, until=8)
g.set_max(Gauge(15, 15, at=0), at=0)
g.max_gauge.add_momentum(-1, until=4)
g.max_gauge.add_momentum(+1, since=4, until=6)
assert g.determination == [
(0, 12), (1, 12), (2, 13), (3, 12), (4, 11), (6, 11), (8, 9)]
def test_hypergauge_case3():
g = Gauge(12, 100, at=0)
g.add_momentum(+1, since=1, until=6)
g.add_momentum(-1, since=3, until=8)
g.set_max(10, at=0)
g.set(12, outbound=OK, at=0)
assert g.determination == [
(0, 12), (1, 12), (3, 12), (5, 10), (6, 10), (8, 8)]
g.set_max(Gauge(10, 100, at=0), at=0)
assert g.determination == [
(0, 12), (1, 12), (3, 12), (5, 10), (6, 10), (8, 8)]
def test_hypergauge_case4():
g = Gauge(12, 100, at=0)
g.add_momentum(+1, since=1, until=6)
g.add_momentum(-1, since=3, until=8)
g.set_max(Gauge(15, 15, at=0), at=0)
g.max_gauge.add_momentum(-1)
assert g.determination == [
(0, 12), (1, 12), (2, 13), (3, 12), (6, 9), (8, 7), (15, 0)]
# bidirectional hyper-gauge
g_max = Gauge(10, 10, at=0)
g_max.add_momentum(-1, since=0, until=4)
g_max.add_momentum(+1, since=6, until=7)
g_min = Gauge(0, 10, at=0)
g_min.add_momentum(+1, since=1, until=6)
g_min.add_momentum(-1, since=6, until=8)
g = Gauge(5, g_max, g_min, at=0)
g.add_momentum(+1, since=0, until=3)
g.add_momentum(-1, since=3, until=6)
g.add_momentum(+1, since=6, until=9)
g.add_momentum(-1, since=9, until=12)
assert g.determination == [
(0, 5), (2.5, 7.5), (3, 7), (4, 6), (5.5, 4.5), (6, 5), (8, 7),
(9, 7), (12, 4)]
g_min.incr(1, at=5)
assert g.determination == [(5, 5), (6, 6), (7, 7), (9, 7), (12, 4)]
def test_hypergauge_zigzag1(zigzag):
assert zigzag.determination == [
(0, 1), (1, 2), (2, 1), (3.5, 2.5), (4, 2), (5.5, 0.5), (6, 1),
(7.5, 2.5), (8, 2), (9, 3), (10, 2), (11.5, 0.5), (12, 1)]
def test_hypergauge_zigzag2():
g = Gauge(2, Gauge(3, 5, 3, at=0), Gauge(2, 2, 0, at=0), at=0)
for x in range(5):
g.max_gauge.add_momentum(+1, since=x * 4, until=x * 4 + 2)
g.max_gauge.add_momentum(-1, since=x * 4 + 2, until=x * 4 + 4)
g.min_gauge.add_momentum(-1, since=x * 4, until=x * 4 + 2)
g.min_gauge.add_momentum(+1, since=x * 4 + 2, until=x * 4 + 4)
for x in range(4):
t = sum(y * 2 for y in range(x + 1))
g.add_momentum(+1, since=t, until=t + (x + 1))
g.add_momentum(-1, since=t + (x + 1), until=t + 2 * (x + 1))
assert g.determination == [
(0, 2), (1, 3), (2, 2), (3.5, 3.5), (4, 3), (6, 1), (8, 3), (9, 4),
(11.5, 1.5), (12, 2), (14.5, 4.5), (16, 3), (18.5, 0.5), (20, 2)]
def test_hypergauge_hybrid1():
# hybrid 1: same velocity of `g` and `g.max_gauge`.
# (suggested by @hybrid0)
g = Gauge(0, Gauge(1, 5, at=0), at=0)
g.add_momentum(+1)
g.max_gauge.add_momentum(+1, since=1)
assert g.determination == [(0, 0), (1, 1), (5, 5)]
def test_hypergauge_hybrid2():
# hybrid 2: velocity of `g.max_gauge` is faster than `g`'s.
g = Gauge(0, Gauge(1, 5, at=0), at=0)
g.add_momentum(+1)
g.max_gauge.add_momentum(+2, since=1)
assert g.determination == [(0, 0), (1, 1), (5, 5)]
def test_hypergauge_hybrid3():
# hybrid 3: velocity of `g.max_gauge` is slower than `g`'s.
g = Gauge(0, Gauge(1, 5, at=0), at=0)
g.add_momentum(+1)
g.max_gauge.add_momentum(+0.5, since=1)
assert g.determination == [(0, 0), (1, 1), (9, 5)]
def test_hyper_hypergauge(zigzag, bidir):
# under zigzag 1
g = Gauge(1, zigzag, at=0)
g.add_momentum(+0.5)
assert round_determination(g.determination, precision=2) == [
(0, 1), (1.33, 1.67), (2, 1), (4, 2), (5.5, 0.5), (9.5, 2.5),
(10, 2), (11.5, 0.5), (12.5, 1)]
# between zigzag 1 ~ bidirectional hyper-gauge
g = Gauge(3, bidir, zigzag, at=0)
g.add_momentum(+3, since=0, until=3)
g.add_momentum(-3, since=3, until=6)
g.add_momentum(+3, since=6, until=9)
g.add_momentum(-3, since=9, until=12)
assert round_determination(g.determination, precision=2) == [
(0, 3), (1, 6), (2.5, 7.5), (3, 7), (5, 1), (5.5, 0.5), (6, 1),
(8, 7), (9, 7), (11, 1), (11.5, 0.5), (12, 1)]
def test_hypergauge_with_different_base_time():
g = Gauge(0, Gauge(10, 100, at=100), at=0)
g.add_momentum(+1)
assert g.max_gauge.get(0) == 10
assert g.get(10) == 10
g = Gauge(0, Gauge(10, 100, at=0), at=100)
g.add_momentum(+1)
assert g.max_gauge.get(100) == 10
assert g.get(110) == 10
def test_limited_gauges():
max_g = Gauge(10, 100, at=0)
g = Gauge(0, max_g, at=0)
assert g in max_g.limited_gauges()
g.set_max(10, at=0)
assert g not in max_g.limited_gauges()
# clear dead links.
g.set_max(max_g, at=0)
assert len(max_g.limited_gauges()) == 1
del g
# NOTE: Weak references could not be collected by GC immediately in PyPy.
for x in range(10):
try:
assert len(max_g.limited_gauges()) == 0
except AssertionError:
continue
else:
break
def test_over_max_on_hypergauge():
g = Gauge(1, Gauge(10, 20, at=0), at=0)
g.max_gauge.add_momentum(+1)
with pytest.raises(ValueError):
g.set(20, at=0)
g.set(20, at=0, outbound=OK)
assert g.get(at=0) == 20
g.set(20, at=10)
assert g.get(at=10) == 20
assert g.get(at=0) == 20 # past was forgot
def test_pickle_hypergauge():
# case 1 from :func:`test_hypergauge`.
g = Gauge(12, 100, at=0)
g.add_momentum(+1, since=1, until=6)
g.add_momentum(-1, since=3, until=8)
g.set_max(Gauge(15, 15, at=0), at=0)
g.max_gauge.add_momentum(-1, until=5)
assert g.determination == [
(0, 12), (1, 12), (2, 13), (3, 12), (5, 10), (6, 10), (8, 8)]
assert g.max_gauge.determination == [(0, 15), (5, 10)]
data = pickle.dumps(g)
g2 = pickle.loads(data)
assert g2.max_gauge is not None
assert g2.determination == [
(0, 12), (1, 12), (2, 13), (3, 12), (5, 10), (6, 10), (8, 8)]
assert g2.max_gauge.determination == [(0, 15), (5, 10)]
assert g2 in g2.max_gauge.limited_gauges()
def test_thin_momenta():
g = Gauge(0, 100, at=0)
for x in range(1000):
g.add_momentum(+1000000000, since=x, until=x + 1e-10)
assert_all_in_range(g)
assert g.get(0) == 0
assert g.get(1001) == 100
for x, y in zip(range(9999), range(1, 10000)):
assert 0 <= g.get(x / 10.) <= g.get(y / 10.) <= 100
def test_clear_momentum_events():
g = Gauge(0, 10, at=0)
m = g.add_momentum(+1, since=10, until=20)
assert list(g.momentum_events()) == \
[(0, NONE, None), (10, ADD, m), (20, REMOVE, m), (+inf, NONE, None)]
# assert len(g._events) == 2
g.remove_momentum(m)
assert list(g.momentum_events()) == [(0, NONE, None), (+inf, NONE, None)]
# assert len(g._events) == 0
def test_decr_max():
# normal gauge
g = Gauge(0, 10, at=0)
g.add_momentum(+2)
g.add_momentum(-1)
assert g.base[TIME] == 0
assert g.get(10) == 10
g.set_max(5, at=10)
g.set(10, outbound=OK, at=10)
assert g.base[TIME] == 10
assert g.get(10) == 10
assert g.get(15) == 5
assert g.get(20) == 5
# hyper-gauge
g = Gauge(0, Gauge(10, 100, at=0), at=0)
g.add_momentum(+2)
g.add_momentum(-1)
assert g.base[TIME] == 0
assert g.get(10) == 10
g.max_gauge.decr(5, at=10)
assert g.base[TIME] == 10
assert g.get(10) == 5
assert g.get(20) == 5
# skewed hyper-gauge
g = Gauge(0, Gauge(10, 100, at=10), at=0)
g.add_momentum(+2)
g.add_momentum(-1)
# assert g.base[TIME] == 0
assert g.get(10) == 10
g.max_gauge.decr(5, at=10)
# assert g.base[TIME] == 0
assert g.get(10) == 5
assert g.get(20) == 5
# decr max earlier than the gauge's base time.
g = Gauge(0, Gauge(10, 100, at=10), at=5)
g.add_momentum(+1)
assert g.determination == [(5, 0), (15, 10)]
g.max_gauge.decr(5, at=0)
assert g.determination == [(5, 0), (10, 5)]
g.max_gauge.incr(10, at=10)
assert g.determination == [(10, 5), (20, 15)]
def test_hypergauge_past_bugs(zigzag, bidir):
"""Regression testing for hyper-gauge."""
# just one momentum
g1 = Gauge(5, Gauge(5, 10, at=0), Gauge(5, 10, at=0), at=0)
g1.max_gauge.add_momentum(+1)
g1.min_gauge.add_momentum(-1)
assert g1.determination == [(0, 5)]
g1.add_momentum(+0.1, until=100)
assert g1.determination == [(0, 5), (50, 10), (100, 10)]
# floating-point inaccuracy problem 1
g1 = Gauge(3, bidir, zigzag, at=0)
g1.add_momentum(+6, since=0, until=1)
g1.add_momentum(-6, since=1, until=2)
g1.add_momentum(+6, since=2, until=3)
g1.add_momentum(-6, since=3, until=4)
g1.add_momentum(+6, since=4, until=5)
g1.add_momentum(-6, since=5, until=6)
g1.add_momentum(+6, since=6, until=7)
g1.add_momentum(-6, since=7, until=8)
g1.add_momentum(+6, since=8, until=9)
g1.add_momentum(-6, since=9, until=10)
g1.add_momentum(+6, since=10, until=11)
g1.add_momentum(-6, since=11, until=12)
assert round_determination(g1.determination, precision=2) == [
(0, 3), (0.4, 5.4), (1, 6), (1.8, 1.2), (2, 1), (3, 7), (3.8, 2.2),
(4, 2), (4.57, 5.43), (5, 5), (5.71, 0.71), (6, 1), (6.8, 5.8), (7, 6),
(7.6, 2.4), (8, 2), (8.83, 7), (9, 7), (9.8, 2.2), (10, 2),
(10.57, 5.43), (11, 5), (11.71, 0.71), (12, 1)]
# float problem 2
g2 = Gauge(0, Gauge(1, 1, at=0), at=0)
for x in range(10):
g2.add_momentum(+0.1, since=x, until=x + 1)
g2.max_gauge.add_momentum(-0.1, since=0, until=6)
g2.max_gauge.add_momentum(+0.5, since=6, until=10)
assert round(g2.get(5), 1) == 0.5
assert round(g2.get(6), 1) == 0.4
assert round(g2.get(7), 1) == 0.5
assert round(g2.get(8), 1) == 0.6
assert round(g2.get(9), 1) == 0.7
assert round(g2.get(10), 1) == 0.8
# float problem 3
g3_max_max = Gauge(3, bidir, zigzag, at=0)
g3_max_max.add_momentum(+6, since=0, until=1)
g3_max_max.add_momentum(-6, since=1, until=2)
g3_max_max.add_momentum(+6, since=2, until=3)
g3_max_max.add_momentum(-6, since=3, until=4)
g3_max_max.add_momentum(+6, since=4, until=5)
g3_max_max.add_momentum(-6, since=5, until=6)
g3_max_max.add_momentum(+6, since=6, until=7)
g3_max_max.add_momentum(-6, since=7, until=8)
g3_max_max.add_momentum(+6, since=8, until=9)
g3_max_max.add_momentum(-6, since=9, until=10)
g3_max_max.add_momentum(+6, since=10, until=11)
g3_max_max.add_momentum(-6, since=11, until=12)
g3_max = Gauge(0, g3_max_max, at=0)
for x in range(10):
g3_max.add_momentum(+0.1, since=x)
r = random.Random(10)
g3 = Gauge(0, shift_gauge(zigzag, +3), g3_max, at=0)
for x in range(10):
g3.add_momentum(r.uniform(-10, 10), since=x, until=x + 1)
assert round(g3.get(9), 1) == 2.9 # not 2.4133871928
# bound at first
g4 = Gauge(0, 10, Gauge(0, 10, at=1), at=0)
g4.min_gauge.add_momentum(+1, until=11)
g4.add_momentum(-1, until=10)
assert g4.get(10) == 9 # not -10
assert g4.determination == [(0, 0), (1, 0), (10, 9), (11, 10)]
# floor is dense than ceil
r = random.Random(2810856076715324514)
g5 = Gauge(0, shift_gauge(zigzag, +3), g3, at=0)
for x in range(4):
g5.add_momentum(r.uniform(-10, 10), since=x, until=x + 1)
assert round(g5.get(4), 1) == 5.0 # not 11.8
def assert_all_in_range(g, message=None):
outbound = True
for t, v in g.determination:
for v in [v, g.get(t)]:
in_range = g.get_min(t) <= v <= g.get_max(t)
if in_range:
outbound = False
continue
elif outbound:
continue
# from gaugeplot import show_gauge
# show_gauge(g)
report = ('[{0!r}] {1!r} <= {2!r} <= {3!r}'
''.format(t, g.get_min(t), v, g.get_max(t)))
if message is None:
message = report
else:
message = '\n'.join([message, report])
pytest.fail(message)
def random_gauge1(random=random, far=10, near=3, until=20):
# (-far ~ -near) <= g <= (near ~ far)
g_max = Gauge(random.uniform(near, far), far, near, at=0)
g_min = Gauge(random.uniform(-far, -near), -near, -far, at=0)
value = random.uniform(g_min.min_value, g_max.max_value)
g = Gauge(value, g_max, g_min, at=0)
for x in range(0, until, 5):
g_max.add_momentum(random.uniform(-far, +far), since=x, until=x + 5)
for x in range(0, until, 2):
g.add_momentum(random.uniform(-far, +far), since=x, until=x + 2)
for x in range(0, until, 1):
g_min.add_momentum(random.uniform(-far, +far), since=x, until=x + 1)
return g
def random_gauge2(random=random, far=1000, near=1, until=20):
# 0 <= g <= (near ~ far)
g_max = Gauge(random.uniform(near, far), far, near, at=0)
value = random.uniform(0, g_max.max_value)
g = Gauge(value, g_max, at=0)
for x in range(0, until, 5):
g_max.add_momentum(random.uniform(-far, +far), since=x, until=x + 5)
for x in range(0, until, 2):
g.add_momentum(random.uniform(-far, +far), since=x, until=x + 2)
return g
def test_randomly():
times = 100
maxint = 2 ** 64 / 2
for y in range(times):
seed = random.randrange(maxint)
g = random_gauge1(Random(seed))
assert_all_in_range(g, 'random_gauge1(R({0}))'.format(seed))
for y in range(times):
seed = random.randrange(maxint)
g = random_gauge1(Random(seed), far=1000)
assert_all_in_range(g, 'random_gauge1(R({0}), far=1000)'.format(seed))
for y in range(times):
seed = random.randrange(maxint)
g = random_gauge1(Random(seed), near=1e-10)
assert_all_in_range(g, 'random_gauge1(R({0}), near=1e-10)'
''.format(seed))
for y in range(times):
seed = random.randrange(maxint)
g = random_gauge2(Random(seed), far=1e4)
assert_all_in_range(g, 'random_gauge2(R({0}), far=1e4)'.format(seed))
@pytest.mark.parametrize('seed', [5425676250556669398, 5788334089912086268])
def test_cpu_hang(seed):
g = random_gauge1(Random(seed))
assert_all_in_range(g, 'random_gauge1(R({0}))'.format(seed))
def test_repaired_random_gauges():
# from test_randomly()
assert_all_in_range(random_gauge1(Random(1098651790867685487)))
assert_all_in_range(random_gauge1(Random(957826144573409526)))
assert_all_in_range(random_gauge1(Random(7276062123994486117), near=1e-10))
assert_all_in_range(random_gauge1(Random(6867673013126676888), near=1e-10))
assert_all_in_range(random_gauge1(Random(8038810374719555655), near=1e-10))
assert_all_in_range(random_gauge1(Random(5925612648020704501), near=1e-10))
assert_all_in_range(random_gauge1(Random(2881266403492433952), far=1000))
assert_all_in_range(random_gauge1(Random(6468976982055982554), far=1000))
assert_all_in_range(random_gauge2(Random(3373542927760325757), far=1e6))
assert_all_in_range(random_gauge2(Random(7588425536572564538), far=1e4))
def test_clamp_on_get():
g = random_gauge1(Random(6883875130559908307))
at = 14.803740162409357
e = 00.000000000000001
g.clear_momenta(at=at)
for x in range(-100, +100):
t = at + x * e
assert g.get_min(t) <= g.get(t)
def test_false_accusation():
g = random_gauge1(Random(6883875130559908307))
assert g.get(15) == -3
g.incr(0, at=14.803740162409364)
assert g.get(15) == -3
g.incr(0, at=14.803740162409365)
assert g.get(15) == -3
def test_goal():
g = Gauge(100, 100, at=0)
assert g.goal() == 100
g.add_momentum(-1)
assert g.goal() == 0
g.add_momentum(+1)
assert g.goal() == 100
g.add_momentum(-1, since=10000, until=10001)
assert g.goal() == 99
def test_clamped_by_max_gauge():
# in_range, decr max -> clamp
g = Gauge(10, Gauge(20, 20, at=0), at=0)
assert g.get(0) == 10
g.max_gauge.set(5, at=0)
assert g.get(0) == 5
# in_range, incr max -> not clamp
g.max_gauge.set(15, at=0)
assert g.get(0) == 5
# outbound, decr max -> not clamp
g.set(20, outbound=OK, at=0)
assert g.get(0) == 20
g.max_gauge.set(10, at=0)
assert g.get(0) == 20
# time-skewed
g = Gauge(10, Gauge(20, 20, at=0), at=0)
g.max_gauge.set(5, at=10)
assert g.base[TIME] == 10
assert g.base[VALUE] == 5
def test_set_range():
g = Gauge(0, 100, at=0)
g.add_momentum(+1)
assert g.determination == [(0, 0), (100, 100)]
g.set_range(Gauge(100, 100, at=0), Gauge(0, 100, at=0), at=0)
g.max_gauge.add_momentum(-1, until=40)
g.min_gauge.add_momentum(+1, until=40)
assert g.determination == [(0, 0), (60, 60)]
g.clear_momenta(at=30)
g.add_momentum(-1)
assert g.determination == [(30, 30), (40, 40)]
def test_in_range():
g = Gauge(20, 10, at=0)
assert not g.in_range(0)
assert not g.in_range(20)
g.add_momentum(-1)
assert not g.in_range(0)
assert g.in_range(20)
def test_clamp():
g = Gauge(20, max=10, min=0, at=0)
assert g.clamp(at=0) == 10
g = Gauge(-10, max=10, min=0, at=0)
assert g.clamp(at=0) == 0
def test_momentum_event_order():
class MyGauge(Gauge):
def _make_momentum(self, m):
return m
g = MyGauge(0, 100, at=0)
m = Momentum(+1, since=10, until=10)
g.add_momentum(m)
assert \
list(g.momentum_events()) == \
[(0, NONE, None), (10, ADD, m), (10, REMOVE, m), (+inf, NONE, None)]
def test_case7():
f = FakeGauge([(0, 0), (1, 1)])
g = Gauge(3.5, f, at=-1)
g.add_momentum(-2)
g.add_momentum(+1)
assert g.determination == [(-1, 3.5), (0.5, 0.5), (1, 0)]
def test_case7_reversed():
f = FakeGauge([(0, 0), (1, -1)])
g = Gauge(-3.5, 0, f, at=-1)
g.add_momentum(+2)
g.add_momentum(-1)
assert g.determination == [(-1, -3.5), (0.5, -0.5), (1, 0)]
def test_intersection_of_vertical_segment():
assert 0 != 1e-309
assert math.isinf(1 / 1e-309)
f = FakeGauge([(0, 0), (1e-309, 1)])
assert f.get(0.000000000000000000000) == 0
assert f.get(0.000000000000000000001) == 1
g = Gauge(2.5, f, at=-1)
g.add_momentum(-2)
g.add_momentum(+1)
assert \
round_determination(g.determination, precision=1) == \
[(-1, 2.5), (0, 0.5), (0.5, 0)]
def test_intersection_of_vertical_segment_reversed():
f = FakeGauge([(0, 0), (1e-309, -1)])
g = Gauge(-2.5, 0, f, at=-1)
g.add_momentum(+2)
g.add_momentum(-1)
assert \
round_determination(g.determination, precision=1) == \
[(-1, -2.5), (0, -0.5), (0.5, 0)]
def test_invalidate_returns():
g = Gauge(0, 100, at=0)
assert not g.invalidate()
g.get(0)
assert g.invalidate()
assert not g.invalidate()
Adapt forget_past() before base time test cases
# -*- coding: utf-8 -*-
from contextlib import contextmanager
import gc
import math
import operator
import pickle
import random
from random import Random
import time
import pytest
from pytest import approx
import gauge
from gauge import Gauge, Momentum
from gauge.constants import ADD, CLAMP, inf, NONE, OK, ONCE, REMOVE
from gauge.deterministic import (
Boundary, Determination, Horizon, Line, Ray, Segment)
PRECISION = 8
TIME, VALUE = 0, 1
class FakeGauge(Gauge):
def __init__(self, determination):
super(FakeGauge, self).__init__(0, 0, 0, 0)
self._determination = Determination.__new__(Determination)
self._determination.extend(determination)
@property
def determination(self):
return self._determination
def round_(x):
return round(x, PRECISION)
@contextmanager
def t(timestamp):
gauge.core.now = lambda: float(timestamp)
try:
yield
finally:
gauge.core.now = time.time
def round_determination(determination, precision=0):
return [(round(time, precision), round(value, precision))
for time, value in determination]
def is_gauge(x):
"""Whether the value is an instance of :class:`Gauge`."""
return isinstance(x, Gauge)
def shift_gauge(gauge, delta=0):
"""Adds the given delta to a gauge."""
if gauge.max_gauge is None:
max_ = gauge.max_value + delta
else:
max_ = shift_gauge(gauge.max_gauge, delta)
if gauge.min_gauge is None:
min_ = gauge.min_value + delta
else:
min_ = shift_gauge(gauge.min_gauge, delta)
g = Gauge(gauge.base[VALUE] + delta, max_, min_, gauge.base[TIME])
for momentum in gauge.momenta:
g.add_momentum(momentum)
return g
def test_momenta_in_range():
g = Gauge(12, 100, at=0)
g.add_momentum(+1, since=1, until=6)
g.add_momentum(-1, since=3, until=8)
assert g.determination == [
(0, 12), (1, 12), (3, 14), (6, 14), (8, 12)]
def test_over_max():
g = Gauge(8, 10, at=0)
g.add_momentum(+1, since=0, until=4)
assert g.determination == [(0, 8), (2, 10), (4, 10)]
g = Gauge(12, 10, at=0)
g.add_momentum(-1, since=0, until=4)
assert g.determination == [(0, 12), (2, 10), (4, 8)]
g = Gauge(12, 10, at=0)
g.add_momentum(+1, since=0, until=4)
g.add_momentum(-2, since=0, until=4)
assert g.determination == [(0, 12), (1, 10), (4, 7)]
g = Gauge(12, 10, at=0)
g.add_momentum(+1, since=1, until=6)
g.add_momentum(-1, since=3, until=8)
g.add_momentum(+1, since=10, until=14)
g.add_momentum(-1, since=13, until=16)
assert g.determination == [
(0, 12), (1, 12), (3, 12), (5, 10), (6, 10), (8, 8),
(10, 8), (12, 10), (13, 10), (14, 10), (16, 8)]
def test_under_min():
g = Gauge(2, 10, at=0)
g.add_momentum(-1, since=0, until=4)
assert g.determination == [(0, 2), (2, 0), (4, 0)]
g = Gauge(-2, 10, at=0)
g.add_momentum(+1, since=0, until=4)
assert g.determination == [(0, -2), (2, 0), (4, 2)]
g = Gauge(-2, 10, at=0)
g.add_momentum(-1, since=0, until=4)
g.add_momentum(+2, since=0, until=4)
assert g.determination == [(0, -2), (1, 0), (4, 3)]
g = Gauge(-2, 10, at=0)
g.add_momentum(-1, since=1, until=6)
g.add_momentum(+1, since=3, until=8)
g.add_momentum(-1, since=10, until=14)
g.add_momentum(+1, since=13, until=16)
assert g.determination == [
(0, -2), (1, -2), (3, -2), (5, 0), (6, 0), (8, 2),
(10, 2), (12, 0), (13, 0), (14, 0), (16, 2)]
def test_permanent():
g = Gauge(10, 10, at=0)
g.add_momentum(-1)
assert g.determination == [(0, 10), (10, 0)]
g = Gauge(0, 10, at=0)
g.add_momentum(+1)
assert g.determination == [(0, 0), (10, 10)]
g = Gauge(12, 10, at=0)
g.add_momentum(-1)
assert g.determination == [(0, 12), (2, 10), (12, 0)]
g = Gauge(5, 10, at=0)
g.add_momentum(+1, since=3)
assert g.determination == [(0, 5), (3, 5), (8, 10)]
g = Gauge(5, 10, at=0)
g.add_momentum(+1, until=8)
assert g.determination == [(0, 5), (5, 10), (8, 10)]
def test_life():
with t(0):
life = Gauge(100, 100)
life.add_momentum(-1)
assert life.get() == 100
with t(1):
assert life.get() == 99
with t(2):
assert life.get() == 98
with t(10):
assert life.get() == 90
life.incr(1)
assert life.get() == 91
with t(11):
assert life.get() == 90
def test_no_momentum():
g = Gauge(1, 10, at=0)
assert g.determination == [(0, 1)]
assert g.get() == 1
def test_ok_outbound():
g = Gauge(1, 10)
with pytest.raises(ValueError):
g.set(11)
with pytest.raises(ValueError):
g.incr(100)
with pytest.raises(ValueError):
g.decr(100)
g.set(10)
assert g.get() == 10
g.set(11, outbound=OK)
assert g.get() == 11
def test_once_outbound():
g = Gauge(1, 10)
assert g.incr(5, outbound=ONCE) == 6
assert g.incr(5, outbound=ONCE) == 11
with pytest.raises(ValueError):
g.incr(1, outbound=ONCE)
def test_clamp_outbound():
g = Gauge(1, 10)
g.set(11, outbound=CLAMP)
assert g.get() == 10
g.incr(100, outbound=CLAMP)
assert g.get() == 10
g.decr(100, outbound=CLAMP)
assert g.get() == 0
g.incr(3, outbound=CLAMP)
assert g.get() == 3
g.decr(1, outbound=CLAMP)
assert g.get() == 2
g.set(100, outbound=OK)
g.incr(3, outbound=CLAMP)
assert g.get() == 100
g.decr(3, outbound=CLAMP)
assert g.get() == 97
g.set(98, outbound=CLAMP)
assert g.get() == 97
g.set(97, outbound=CLAMP)
assert g.get() == 97
g.set(96, outbound=CLAMP)
assert g.get() == 96
def test_set_min_max():
# without momentum
g = Gauge(5, 10)
assert g.get_max() == 10
assert g.get_min() == 0
assert g.get() == 5
g.set_range(max=100, min=10)
assert g.get_max() == 100
assert g.get_min() == 10
assert g.get() == 10
g.set_min(10)
assert g.get() == 10
g.set_min(5)
assert g.get() == 10
g.set_range(max=5, min=0)
assert g.get_max() == 5
assert g.get_min() == 0
assert g.get() == 5
# with momentum
g = Gauge(5, 10, at=0)
g.add_momentum(+1)
assert g.determination == [(0, 5), (5, 10)]
g.set_max(50, at=0)
assert g.determination == [(0, 5), (45, 50)]
g.set_min(40, at=0)
assert g.determination == [(0, 40), (10, 50)]
def test_pickle():
g = Gauge(0, 10, at=0)
r = Random(17171771)
for x in range(10000):
since = r.randrange(1000)
until = since + 1 + r.randrange(1000)
g.add_momentum(r.uniform(-10, +10), since=since, until=until)
data = pickle.dumps(g)
g2 = pickle.loads(data)
assert g.determination == g2.determination
def test_make_momentum():
g = Gauge(0, 10, at=0)
m = g.add_momentum(+1)
assert isinstance(m, Momentum)
with pytest.raises(TypeError):
g.add_momentum(m, since=1)
with pytest.raises(TypeError):
g.add_momentum(m, until=2)
def test_clear_momenta():
g = Gauge(0, 10, at=0)
g.add_momentum(+1)
g.clear_momenta(at=5)
assert g.get(5) == 5
assert g.determination == [(5, 5)]
# clear momenta when the value is out of the range
g.add_momentum(+1)
g.set(15, outbound=OK, at=10)
g.clear_momenta(at=10)
assert g.get(10) == 15
assert g.determination == [(10, 15)]
# rebase by Gauge.clear_momenta()
g.clear_momenta(100)
assert g.get() == 100
def test_when():
g = Gauge(0, 10, at=0)
assert g.when(0) == 0
with pytest.raises(ValueError):
g.when(10)
g.add_momentum(+1)
assert g.when(10) == 10
g.add_momentum(+1, since=3, until=5)
assert g.when(10) == 8
g.add_momentum(-2, since=4, until=8)
assert g.when(0) == 0
assert g.when(1) == 1
assert g.when(2) == 2
assert g.when(3) == 3
assert g.when(4) == 3.5
assert g.when(5) == 4
assert g.when(6) == 12
assert g.when(7) == 13
assert g.when(8) == 14
assert g.when(9) == 15
assert g.when(10) == 16
with pytest.raises(ValueError):
g.when(11)
def test_whenever():
g = Gauge(0, 10, at=0)
g.add_momentum(+1)
g.add_momentum(-2, since=3, until=4)
g.add_momentum(-2, since=5, until=6)
g.add_momentum(-2, since=7, until=8)
assert g.when(3) == 3
assert g.when(3, after=1) == 5
assert g.when(3, after=2) == 7
assert g.when(3, after=3) == 9
with pytest.raises(ValueError):
g.when(3, after=4)
whenever = g.whenever(3)
assert list(whenever) == [3, 5, 7, 9]
# inverse
g = Gauge(10, 10, at=0)
g.add_momentum(-1)
g.add_momentum(+2, since=3, until=4)
g.add_momentum(+2, since=5, until=6)
g.add_momentum(+2, since=7, until=8)
assert g.when(7) == 3
assert g.when(7, after=1) == 5
def test_since_gte_until():
g = Gauge(0, 10, at=0)
with pytest.raises(ValueError):
g.add_momentum(+1, since=1, until=1)
with pytest.raises(ValueError):
g.add_momentum(+1, since=2, until=1)
def test_repr():
g = Gauge(0, 10, at=0)
assert repr(g) == '<Gauge 0.00/10.00>'
g.set_min(-10, at=0)
assert repr(g) == '<Gauge 0.00 between -10.00~10.00>'
g.set_max(Gauge(10, 10), at=0)
assert repr(g) == '<Gauge 0.00 between -10.00~<Gauge 10.00/10.00>>'
m = Momentum(+100, since=10, until=20)
assert repr(m) == '<Momentum +100.00/s 10.00~20.00>'
m = Momentum(+100, since=10)
assert repr(m) == '<Momentum +100.00/s 10.00~>'
m = Momentum(+100, until=20)
assert repr(m) == '<Momentum +100.00/s ~20.00>'
h = Horizon(10, 20, 30)
assert repr(h) == '<Line[HORIZON] 30.00 for 10.00~20.00>'
r = Ray(10, 20, 30, 40)
assert repr(r) == '<Line[RAY] 30.00+40.00/s for 10.00~20.00>'
def test_case1():
g = Gauge(0, 5, at=0)
g.add_momentum(+1)
g.add_momentum(-2, since=1, until=3)
g.add_momentum(+1, since=5, until=7)
assert g.determination == [
(0, 0), (1, 1), (2, 0), (3, 0), (5, 2), (6.5, 5), (7, 5)]
def test_case2():
g = Gauge(12, 10, at=0)
g.add_momentum(+2, since=2, until=10)
g.add_momentum(-1, since=4, until=8)
assert g.determination == [
(0, 12), (2, 12), (4, 12), (6, 10), (8, 10), (10, 10)]
def test_case3():
g = Gauge(0, 10, at=0)
assert g.get(0) == 0
g.add_momentum(+1, since=0)
assert g.get(10) == 10
g.incr(3, outbound=OK, at=11)
assert g.get(11) == 13
g.add_momentum(-1, since=13)
assert g.get(13) == 13
assert g.get(14) == 12
assert g.get(15) == 11
assert g.get(16) == 10
assert g.get(17) == 10
def test_case4():
g = Gauge(0, 10, at=0)
g.add_momentum(+1)
g.add_momentum(+1)
assert g.determination == [(0, 0), (5, 10)]
def test_case5():
g = Gauge(1, 1, 0, at=0)
for x in range(11):
g.add_momentum(-0.1, since=x, until=x + 1)
assert g.get(11) == 0 # adjusted by min=0
def test_case6():
g = Gauge(1, 10, at=1417868986.94428)
g.add_momentum(+0.167)
g.add_momentum(-0.417, since=1417863954.884099)
assert g.determination[-1][VALUE] == 0
def test_remove_momentum():
g = Gauge(0, 10, at=0)
m1 = g.add_momentum(+1)
m2 = g.add_momentum(Momentum(+1))
g.add_momentum(+2, since=10)
g.add_momentum(-3, until=100)
assert len(g.momenta) == 4
assert g.remove_momentum(m2) == m2
assert len(g.momenta) == 3
assert m1 in g.momenta
assert m2 in g.momenta
assert g.remove_momentum(m2) == m2
assert len(g.momenta) == 2
assert m1 not in g.momenta
assert m2 not in g.momenta
with pytest.raises(ValueError):
g.remove_momentum(+2)
assert g.remove_momentum(+2, since=10) == (+2, 10, +inf)
assert len(g.momenta) == 1
assert g.remove_momentum(Momentum(-3, until=100)) == (-3, -inf, 100)
assert not g.momenta
def test_remove_momentum_event_on_remove_momentum():
g = Gauge(0, 10, at=0)
g.add_momentum(+1)
assert g.determination == [(0, 0), (10, 10)]
g.remove_momentum(+1)
g.add_momentum(+1)
assert g.determination == [(0, 0), (10, 10)]
g.remove_momentum(+1)
g.add_momentum(+1)
g.add_momentum(+1)
assert g.determination == [(0, 0), (5, 10)]
g.clear_momenta(at=0)
g.add_momentum(+1)
assert g.determination == [(0, 0), (10, 10)]
def test_momenta_order():
g = Gauge(0, 50, at=0)
g.add_momentum(+3, since=0, until=5)
g.add_momentum(+2, since=1, until=4)
g.add_momentum(+1, since=2, until=3)
assert g.get(0) == 0
assert g.get(1) == 3
assert g.get(2) == 8
assert g.get(3) == 14
g.decr(1, at=3)
assert g.get(3) == 13
assert g.get(4) == 18
assert g.get(5) == 21
def test_forget_past():
g = Gauge(0, 50, at=0)
g.add_momentum(+1, since=0, until=5)
g.add_momentum(0, since=0)
g.add_momentum(0, until=999)
assert g.get(0) == 0
assert g.get(1) == 1
assert g.get(2) == 2
assert g.get(3) == 3
assert g.get(4) == 4
assert g.get(5) == 5
assert g.get(10) == 5
assert g.get(20) == 5
assert len(g.momenta) == 3
g.forget_past(at=30)
assert len(g.momenta) == 2
def test_forget_past_before_base_time():
g = Gauge(0, 100, at=100)
g.add_momentum(+1)
assert g.get(100) == 0
assert g.get(150) == 50
assert g.get(200) == 100
with pytest.raises(ValueError):
g.forget_past(at=50)
assert g.get(100) == 0
assert g.get(150) == 50
assert g.get(200) == 100
g.forget_past(at=150)
assert g.get(100) == 50
assert g.get(150) == 50
assert g.get(200) == 100
with pytest.raises(ValueError):
g.forget_past(0, at=100)
assert g.get(100) == 50
assert g.get(150) == 50
assert g.get(200) == 100
def test_extensibility_of_make_momentum():
class MyGauge(Gauge):
def _make_momentum(self, *args):
args = args[::-1]
return super(MyGauge, self)._make_momentum(*args)
g = MyGauge(0, 10, at=0)
m = g.add_momentum(3, 2, 1)
assert m == (1, 2, 3)
def test_just_one_momentum():
def gen_gauge(since=None, until=None):
g = Gauge(5, 10, at=0)
g.add_momentum(+0.1, since, until)
return g
# None ~ None
g = gen_gauge()
assert g.determination == [(0, 5), (50, 10)]
# 0 ~ None
g = gen_gauge(since=0)
assert g.determination == [(0, 5), (50, 10)]
# None ~ 100
g = gen_gauge(until=100)
assert g.determination == [(0, 5), (50, 10), (100, 10)]
# 0 ~ 100
g = gen_gauge(since=0, until=100)
assert g.determination == [(0, 5), (50, 10), (100, 10)]
# -100 ~ 100
g = gen_gauge(since=-100, until=100)
assert g.determination == [(0, 5), (50, 10), (100, 10)]
def test_velocity():
g = Gauge(0, 10, at=0)
g.add_momentum(+1, since=2)
g.add_momentum(+1, since=4, until=6)
assert g.velocity(at=0) == 0
assert g.velocity(at=2) == +1
assert g.velocity(at=3) == +1
assert g.velocity(at=4) == +2
assert g.velocity(at=5) == +2
assert g.velocity(at=6) == +1
assert g.velocity(at=7) == +1
assert g.velocity(at=8) == +1
assert g.velocity(at=9) == +1
assert g.velocity(at=10) == 0
def test_lines():
horizon = Horizon(0, 10, 1234)
assert horizon.get(0) == 1234
assert horizon.get(10) == 1234
assert horizon.guess(100) == 1234
assert horizon.guess(-100) == 1234
ray = Ray(0, 10, 0, velocity=+1)
assert ray.get(0) == 0
assert ray.get(5) == 5
with pytest.raises(ValueError):
ray.get(-1)
with pytest.raises(ValueError):
ray.get(11)
assert ray.guess(-1) == 0
assert ray.guess(11) == 10
assert ray.intersect(Horizon(0, 10, 5)) == (5, 5)
assert ray.intersect(Horizon(0, 10, 10)) == (10, 10)
assert ray.intersect(Horizon(0, +inf, 5)) == (5, 5)
with pytest.raises(ValueError):
ray.intersect(Horizon(0, 10, 15))
with pytest.raises(ValueError):
ray.intersect(Horizon(6, 10, 5))
with pytest.raises(ValueError):
ray.intersect(Horizon(-inf, +inf, 5))
ray = Ray(0, +inf, 0, velocity=+1)
assert ray.get(100) == 100
assert ray.get(100000) == 100000
seg = Segment(0, 10, -50.05804016454045, 12.780503036230357)
assert seg.get(10) == 12.780503036230357
assert seg.guess(100) == 12.780503036230357
assert seg.guess(-100) == -50.05804016454045
def test_boundary():
# walk
lines = [Horizon(0, 10, 0),
Ray(10, 20, 0, velocity=+1),
Ray(20, 30, 10, velocity=-1)]
boundary = Boundary(lines)
assert boundary.line is lines[0]
boundary.walk()
assert boundary.line is lines[1]
boundary.walk()
assert boundary.line is lines[2]
with pytest.raises(StopIteration):
boundary.walk()
# cmp
assert boundary.cmp(1, 2)
assert not boundary.cmp(2, 1)
assert boundary.cmp_eq(1, 2)
assert boundary.cmp_eq(1, 1)
assert not boundary.cmp_eq(2, 1)
assert boundary.cmp_inv(2, 1)
assert not boundary.cmp_inv(1, 2)
assert not boundary.cmp_inv(1, 1)
# best
zero_line = Segment(0, 0, 0, 0)
ceil = Boundary([zero_line], operator.lt)
floor = Boundary([zero_line], operator.gt)
assert ceil.best is min
assert floor.best is max
# repr
assert repr(ceil) == ('<Boundary line={0}, cmp=<built-in function lt>>'
''.format(zero_line))
@pytest.fixture
def zigzag():
g = Gauge(1, Gauge(2, 3, 2, at=0), Gauge(1, 1, 0, at=0), at=0)
for x in range(6):
g.max_gauge.add_momentum(+1, since=x * 2, until=x * 2 + 1)
g.max_gauge.add_momentum(-1, since=x * 2 + 1, until=x * 2 + 2)
g.min_gauge.add_momentum(-1, since=x * 2, until=x * 2 + 1)
g.min_gauge.add_momentum(+1, since=x * 2 + 1, until=x * 2 + 2)
for x in range(3):
t = sum(y * 2 for y in range(x + 1))
g.add_momentum(+1, since=t, until=t + (x + 1))
g.add_momentum(-1, since=t + (x + 1), until=t + 2 * (x + 1))
return g
@pytest.fixture
def bidir():
g = Gauge(5, Gauge(10, 10, at=0), Gauge(0, 10, at=0), at=0)
g.add_momentum(+1, since=0, until=3)
g.add_momentum(-1, since=3, until=6)
g.add_momentum(+1, since=6, until=9)
g.add_momentum(-1, since=9, until=12)
g.max_gauge.add_momentum(-1, since=0, until=4)
g.max_gauge.add_momentum(+1, since=6, until=7)
g.min_gauge.add_momentum(+1, since=1, until=6)
g.min_gauge.add_momentum(-1, since=6, until=8)
return g
def test_hypergauge_case1():
g = Gauge(12, 100, at=0)
g.add_momentum(+1, since=1, until=6)
g.add_momentum(-1, since=3, until=8)
g.set_max(Gauge(15, 15, at=0), at=0)
g.max_gauge.add_momentum(-1, until=5)
assert g.determination == [
(0, 12), (1, 12), (2, 13), (3, 12), (5, 10), (6, 10), (8, 8)]
assert g.max_gauge.determination == [(0, 15), (5, 10)]
def test_hypergauge_case2():
g = Gauge(12, 100, at=0)
g.add_momentum(+1, since=1, until=6)
g.add_momentum(-1, since=3, until=8)
g.set_max(Gauge(15, 15, at=0), at=0)
g.max_gauge.add_momentum(-1, until=4)
g.max_gauge.add_momentum(+1, since=4, until=6)
assert g.determination == [
(0, 12), (1, 12), (2, 13), (3, 12), (4, 11), (6, 11), (8, 9)]
def test_hypergauge_case3():
g = Gauge(12, 100, at=0)
g.add_momentum(+1, since=1, until=6)
g.add_momentum(-1, since=3, until=8)
g.set_max(10, at=0)
g.set(12, outbound=OK, at=0)
assert g.determination == [
(0, 12), (1, 12), (3, 12), (5, 10), (6, 10), (8, 8)]
g.set_max(Gauge(10, 100, at=0), at=0)
assert g.determination == [
(0, 12), (1, 12), (3, 12), (5, 10), (6, 10), (8, 8)]
def test_hypergauge_case4():
g = Gauge(12, 100, at=0)
g.add_momentum(+1, since=1, until=6)
g.add_momentum(-1, since=3, until=8)
g.set_max(Gauge(15, 15, at=0), at=0)
g.max_gauge.add_momentum(-1)
assert g.determination == [
(0, 12), (1, 12), (2, 13), (3, 12), (6, 9), (8, 7), (15, 0)]
# bidirectional hyper-gauge
g_max = Gauge(10, 10, at=0)
g_max.add_momentum(-1, since=0, until=4)
g_max.add_momentum(+1, since=6, until=7)
g_min = Gauge(0, 10, at=0)
g_min.add_momentum(+1, since=1, until=6)
g_min.add_momentum(-1, since=6, until=8)
g = Gauge(5, g_max, g_min, at=0)
g.add_momentum(+1, since=0, until=3)
g.add_momentum(-1, since=3, until=6)
g.add_momentum(+1, since=6, until=9)
g.add_momentum(-1, since=9, until=12)
assert g.determination == [
(0, 5), (2.5, 7.5), (3, 7), (4, 6), (5.5, 4.5), (6, 5), (8, 7),
(9, 7), (12, 4)]
g_min.incr(1, at=5)
assert g.determination == [(5, 5), (6, 6), (7, 7), (9, 7), (12, 4)]
def test_hypergauge_zigzag1(zigzag):
assert zigzag.determination == [
(0, 1), (1, 2), (2, 1), (3.5, 2.5), (4, 2), (5.5, 0.5), (6, 1),
(7.5, 2.5), (8, 2), (9, 3), (10, 2), (11.5, 0.5), (12, 1)]
def test_hypergauge_zigzag2():
g = Gauge(2, Gauge(3, 5, 3, at=0), Gauge(2, 2, 0, at=0), at=0)
for x in range(5):
g.max_gauge.add_momentum(+1, since=x * 4, until=x * 4 + 2)
g.max_gauge.add_momentum(-1, since=x * 4 + 2, until=x * 4 + 4)
g.min_gauge.add_momentum(-1, since=x * 4, until=x * 4 + 2)
g.min_gauge.add_momentum(+1, since=x * 4 + 2, until=x * 4 + 4)
for x in range(4):
t = sum(y * 2 for y in range(x + 1))
g.add_momentum(+1, since=t, until=t + (x + 1))
g.add_momentum(-1, since=t + (x + 1), until=t + 2 * (x + 1))
assert g.determination == [
(0, 2), (1, 3), (2, 2), (3.5, 3.5), (4, 3), (6, 1), (8, 3), (9, 4),
(11.5, 1.5), (12, 2), (14.5, 4.5), (16, 3), (18.5, 0.5), (20, 2)]
def test_hypergauge_hybrid1():
# hybrid 1: same velocity of `g` and `g.max_gauge`.
# (suggested by @hybrid0)
g = Gauge(0, Gauge(1, 5, at=0), at=0)
g.add_momentum(+1)
g.max_gauge.add_momentum(+1, since=1)
assert g.determination == [(0, 0), (1, 1), (5, 5)]
def test_hypergauge_hybrid2():
# hybrid 2: velocity of `g.max_gauge` is faster than `g`'s.
g = Gauge(0, Gauge(1, 5, at=0), at=0)
g.add_momentum(+1)
g.max_gauge.add_momentum(+2, since=1)
assert g.determination == [(0, 0), (1, 1), (5, 5)]
def test_hypergauge_hybrid3():
# hybrid 3: velocity of `g.max_gauge` is slower than `g`'s.
g = Gauge(0, Gauge(1, 5, at=0), at=0)
g.add_momentum(+1)
g.max_gauge.add_momentum(+0.5, since=1)
assert g.determination == [(0, 0), (1, 1), (9, 5)]
def test_hyper_hypergauge(zigzag, bidir):
# under zigzag 1
g = Gauge(1, zigzag, at=0)
g.add_momentum(+0.5)
assert round_determination(g.determination, precision=2) == [
(0, 1), (1.33, 1.67), (2, 1), (4, 2), (5.5, 0.5), (9.5, 2.5),
(10, 2), (11.5, 0.5), (12.5, 1)]
# between zigzag 1 ~ bidirectional hyper-gauge
g = Gauge(3, bidir, zigzag, at=0)
g.add_momentum(+3, since=0, until=3)
g.add_momentum(-3, since=3, until=6)
g.add_momentum(+3, since=6, until=9)
g.add_momentum(-3, since=9, until=12)
assert round_determination(g.determination, precision=2) == [
(0, 3), (1, 6), (2.5, 7.5), (3, 7), (5, 1), (5.5, 0.5), (6, 1),
(8, 7), (9, 7), (11, 1), (11.5, 0.5), (12, 1)]
def test_hypergauge_with_different_base_time():
g = Gauge(0, Gauge(10, 100, at=100), at=0)
g.add_momentum(+1)
assert g.max_gauge.get(0) == 10
assert g.get(10) == 10
g = Gauge(0, Gauge(10, 100, at=0), at=100)
g.add_momentum(+1)
assert g.max_gauge.get(100) == 10
assert g.get(110) == 10
def test_limited_gauges():
max_g = Gauge(10, 100, at=0)
g = Gauge(0, max_g, at=0)
assert g in max_g.limited_gauges()
g.set_max(10, at=0)
assert g not in max_g.limited_gauges()
# clear dead links.
g.set_max(max_g, at=0)
assert len(max_g.limited_gauges()) == 1
del g
# NOTE: Weak references could not be collected by GC immediately in PyPy.
for x in range(10):
try:
assert len(max_g.limited_gauges()) == 0
except AssertionError:
continue
else:
break
def test_over_max_on_hypergauge():
g = Gauge(1, Gauge(10, 20, at=0), at=0)
g.max_gauge.add_momentum(+1)
with pytest.raises(ValueError):
g.set(20, at=0)
g.set(20, at=0, outbound=OK)
assert g.get(at=0) == 20
g.set(20, at=10)
assert g.get(at=10) == 20
assert g.get(at=0) == 20 # past was forgot
def test_pickle_hypergauge():
# case 1 from :func:`test_hypergauge`.
g = Gauge(12, 100, at=0)
g.add_momentum(+1, since=1, until=6)
g.add_momentum(-1, since=3, until=8)
g.set_max(Gauge(15, 15, at=0), at=0)
g.max_gauge.add_momentum(-1, until=5)
assert g.determination == [
(0, 12), (1, 12), (2, 13), (3, 12), (5, 10), (6, 10), (8, 8)]
assert g.max_gauge.determination == [(0, 15), (5, 10)]
data = pickle.dumps(g)
g2 = pickle.loads(data)
assert g2.max_gauge is not None
assert g2.determination == [
(0, 12), (1, 12), (2, 13), (3, 12), (5, 10), (6, 10), (8, 8)]
assert g2.max_gauge.determination == [(0, 15), (5, 10)]
assert g2 in g2.max_gauge.limited_gauges()
def test_thin_momenta():
g = Gauge(0, 100, at=0)
for x in range(1000):
g.add_momentum(+1000000000, since=x, until=x + 1e-10)
assert_all_in_range(g)
assert g.get(0) == 0
assert g.get(1001) == 100
for x, y in zip(range(9999), range(1, 10000)):
assert 0 <= g.get(x / 10.) <= g.get(y / 10.) <= 100
def test_clear_momentum_events():
g = Gauge(0, 10, at=0)
m = g.add_momentum(+1, since=10, until=20)
assert list(g.momentum_events()) == \
[(0, NONE, None), (10, ADD, m), (20, REMOVE, m), (+inf, NONE, None)]
# assert len(g._events) == 2
g.remove_momentum(m)
assert list(g.momentum_events()) == [(0, NONE, None), (+inf, NONE, None)]
# assert len(g._events) == 0
def test_decr_max_normal():
g = Gauge(0, 10, at=0)
g.add_momentum(+2)
g.add_momentum(-1)
assert g.base[TIME] == 0
assert g.get(10) == 10
g.set_max(5, at=10)
g.set(10, outbound=OK, at=10)
assert g.base[TIME] == 10
assert g.get(10) == 10
assert g.get(15) == 5
assert g.get(20) == 5
def test_decr_max_hyper():
g = Gauge(0, Gauge(10, 100, at=0), at=0)
g.add_momentum(+2)
g.add_momentum(-1)
assert g.base[TIME] == 0
assert g.get(10) == 10
g.max_gauge.decr(5, at=10)
assert g.base[TIME] == 10
assert g.get(10) == 5
assert g.get(20) == 5
def test_decr_max_skewed_hyper():
# skewed hyper-gauge
g = Gauge(0, Gauge(10, 100, at=10), at=0)
g.add_momentum(+2)
g.add_momentum(-1)
assert g.base[TIME] == 0
assert g.get(10) == 10
g.max_gauge.decr(5, at=10)
assert g.base[TIME] == 10
assert g.get(10) == 5
assert g.get(20) == 5
def test_decr_max_before_base_time():
# decr max earlier than the gauge's base time.
g = Gauge(0, Gauge(10, 100, at=10), at=5)
g.add_momentum(+1)
assert g.determination == [(5, 0), (15, 10)]
with pytest.raises(ValueError):
g.max_gauge.decr(5, at=0)
assert g.determination == [(5, 0), (15, 10)]
g.max_gauge.incr(10, at=10)
assert g.determination == [(10, 5), (25, 20)]
def test_hypergauge_past_bugs(zigzag, bidir):
"""Regression testing for hyper-gauge."""
# just one momentum
g1 = Gauge(5, Gauge(5, 10, at=0), Gauge(5, 10, at=0), at=0)
g1.max_gauge.add_momentum(+1)
g1.min_gauge.add_momentum(-1)
assert g1.determination == [(0, 5)]
g1.add_momentum(+0.1, until=100)
assert g1.determination == [(0, 5), (50, 10), (100, 10)]
# floating-point inaccuracy problem 1
g1 = Gauge(3, bidir, zigzag, at=0)
g1.add_momentum(+6, since=0, until=1)
g1.add_momentum(-6, since=1, until=2)
g1.add_momentum(+6, since=2, until=3)
g1.add_momentum(-6, since=3, until=4)
g1.add_momentum(+6, since=4, until=5)
g1.add_momentum(-6, since=5, until=6)
g1.add_momentum(+6, since=6, until=7)
g1.add_momentum(-6, since=7, until=8)
g1.add_momentum(+6, since=8, until=9)
g1.add_momentum(-6, since=9, until=10)
g1.add_momentum(+6, since=10, until=11)
g1.add_momentum(-6, since=11, until=12)
assert round_determination(g1.determination, precision=2) == [
(0, 3), (0.4, 5.4), (1, 6), (1.8, 1.2), (2, 1), (3, 7), (3.8, 2.2),
(4, 2), (4.57, 5.43), (5, 5), (5.71, 0.71), (6, 1), (6.8, 5.8), (7, 6),
(7.6, 2.4), (8, 2), (8.83, 7), (9, 7), (9.8, 2.2), (10, 2),
(10.57, 5.43), (11, 5), (11.71, 0.71), (12, 1)]
# float problem 2
g2 = Gauge(0, Gauge(1, 1, at=0), at=0)
for x in range(10):
g2.add_momentum(+0.1, since=x, until=x + 1)
g2.max_gauge.add_momentum(-0.1, since=0, until=6)
g2.max_gauge.add_momentum(+0.5, since=6, until=10)
assert round(g2.get(5), 1) == 0.5
assert round(g2.get(6), 1) == 0.4
assert round(g2.get(7), 1) == 0.5
assert round(g2.get(8), 1) == 0.6
assert round(g2.get(9), 1) == 0.7
assert round(g2.get(10), 1) == 0.8
# float problem 3
g3_max_max = Gauge(3, bidir, zigzag, at=0)
g3_max_max.add_momentum(+6, since=0, until=1)
g3_max_max.add_momentum(-6, since=1, until=2)
g3_max_max.add_momentum(+6, since=2, until=3)
g3_max_max.add_momentum(-6, since=3, until=4)
g3_max_max.add_momentum(+6, since=4, until=5)
g3_max_max.add_momentum(-6, since=5, until=6)
g3_max_max.add_momentum(+6, since=6, until=7)
g3_max_max.add_momentum(-6, since=7, until=8)
g3_max_max.add_momentum(+6, since=8, until=9)
g3_max_max.add_momentum(-6, since=9, until=10)
g3_max_max.add_momentum(+6, since=10, until=11)
g3_max_max.add_momentum(-6, since=11, until=12)
g3_max = Gauge(0, g3_max_max, at=0)
for x in range(10):
g3_max.add_momentum(+0.1, since=x)
r = random.Random(10)
g3 = Gauge(0, shift_gauge(zigzag, +3), g3_max, at=0)
for x in range(10):
g3.add_momentum(r.uniform(-10, 10), since=x, until=x + 1)
assert round(g3.get(9), 1) == 2.9 # not 2.4133871928
# bound at first
g4 = Gauge(0, 10, Gauge(0, 10, at=1), at=0)
g4.min_gauge.add_momentum(+1, until=11)
g4.add_momentum(-1, until=10)
assert g4.get(10) == 9 # not -10
assert g4.determination == [(0, 0), (1, 0), (10, 9), (11, 10)]
# floor is dense than ceil
r = random.Random(2810856076715324514)
g5 = Gauge(0, shift_gauge(zigzag, +3), g3, at=0)
for x in range(4):
g5.add_momentum(r.uniform(-10, 10), since=x, until=x + 1)
assert round(g5.get(4), 1) == 5.0 # not 11.8
def assert_all_in_range(g, message=None):
outbound = True
for t, v in g.determination:
for v in [v, g.get(t)]:
in_range = g.get_min(t) <= v <= g.get_max(t)
if in_range:
outbound = False
continue
elif outbound:
continue
# from gaugeplot import show_gauge
# show_gauge(g)
report = ('[{0!r}] {1!r} <= {2!r} <= {3!r}'
''.format(t, g.get_min(t), v, g.get_max(t)))
if message is None:
message = report
else:
message = '\n'.join([message, report])
pytest.fail(message)
def random_gauge1(random=random, far=10, near=3, until=20):
# (-far ~ -near) <= g <= (near ~ far)
g_max = Gauge(random.uniform(near, far), far, near, at=0)
g_min = Gauge(random.uniform(-far, -near), -near, -far, at=0)
value = random.uniform(g_min.min_value, g_max.max_value)
g = Gauge(value, g_max, g_min, at=0)
for x in range(0, until, 5):
g_max.add_momentum(random.uniform(-far, +far), since=x, until=x + 5)
for x in range(0, until, 2):
g.add_momentum(random.uniform(-far, +far), since=x, until=x + 2)
for x in range(0, until, 1):
g_min.add_momentum(random.uniform(-far, +far), since=x, until=x + 1)
return g
def random_gauge2(random=random, far=1000, near=1, until=20):
# 0 <= g <= (near ~ far)
g_max = Gauge(random.uniform(near, far), far, near, at=0)
value = random.uniform(0, g_max.max_value)
g = Gauge(value, g_max, at=0)
for x in range(0, until, 5):
g_max.add_momentum(random.uniform(-far, +far), since=x, until=x + 5)
for x in range(0, until, 2):
g.add_momentum(random.uniform(-far, +far), since=x, until=x + 2)
return g
def test_randomly():
times = 100
maxint = 2 ** 64 / 2
for y in range(times):
seed = random.randrange(maxint)
g = random_gauge1(Random(seed))
assert_all_in_range(g, 'random_gauge1(R({0}))'.format(seed))
for y in range(times):
seed = random.randrange(maxint)
g = random_gauge1(Random(seed), far=1000)
assert_all_in_range(g, 'random_gauge1(R({0}), far=1000)'.format(seed))
for y in range(times):
seed = random.randrange(maxint)
g = random_gauge1(Random(seed), near=1e-10)
assert_all_in_range(g, 'random_gauge1(R({0}), near=1e-10)'
''.format(seed))
for y in range(times):
seed = random.randrange(maxint)
g = random_gauge2(Random(seed), far=1e4)
assert_all_in_range(g, 'random_gauge2(R({0}), far=1e4)'.format(seed))
@pytest.mark.parametrize('seed', [5425676250556669398, 5788334089912086268])
def test_cpu_hang(seed):
g = random_gauge1(Random(seed))
assert_all_in_range(g, 'random_gauge1(R({0}))'.format(seed))
def test_repaired_random_gauges():
# from test_randomly()
assert_all_in_range(random_gauge1(Random(1098651790867685487)))
assert_all_in_range(random_gauge1(Random(957826144573409526)))
assert_all_in_range(random_gauge1(Random(7276062123994486117), near=1e-10))
assert_all_in_range(random_gauge1(Random(6867673013126676888), near=1e-10))
assert_all_in_range(random_gauge1(Random(8038810374719555655), near=1e-10))
assert_all_in_range(random_gauge1(Random(5925612648020704501), near=1e-10))
assert_all_in_range(random_gauge1(Random(2881266403492433952), far=1000))
assert_all_in_range(random_gauge1(Random(6468976982055982554), far=1000))
assert_all_in_range(random_gauge2(Random(3373542927760325757), far=1e6))
assert_all_in_range(random_gauge2(Random(7588425536572564538), far=1e4))
def test_clamp_on_get():
g = random_gauge1(Random(6883875130559908307))
at = 14.803740162409357
e = 00.000000000000001
g.clear_momenta(at=at)
for x in range(-100, +100):
t = at + x * e
assert g.get_min(t) <= g.get(t)
def test_false_accusation():
g = random_gauge1(Random(6883875130559908307))
assert g.get(15) == -3
g.incr(0, at=14.803740162409364)
assert g.get(15) == -3
g.incr(0, at=14.803740162409365)
assert g.get(15) == -3
def test_goal():
g = Gauge(100, 100, at=0)
assert g.goal() == 100
g.add_momentum(-1)
assert g.goal() == 0
g.add_momentum(+1)
assert g.goal() == 100
g.add_momentum(-1, since=10000, until=10001)
assert g.goal() == 99
def test_clamped_by_max_gauge():
# in_range, decr max -> clamp
g = Gauge(10, Gauge(20, 20, at=0), at=0)
assert g.get(0) == 10
g.max_gauge.set(5, at=0)
assert g.get(0) == 5
# in_range, incr max -> not clamp
g.max_gauge.set(15, at=0)
assert g.get(0) == 5
# outbound, decr max -> not clamp
g.set(20, outbound=OK, at=0)
assert g.get(0) == 20
g.max_gauge.set(10, at=0)
assert g.get(0) == 20
# time-skewed
g = Gauge(10, Gauge(20, 20, at=0), at=0)
g.max_gauge.set(5, at=10)
assert g.base[TIME] == 10
assert g.base[VALUE] == 5
def test_set_range():
g = Gauge(0, 100, at=0)
g.add_momentum(+1)
assert g.determination == [(0, 0), (100, 100)]
g.set_range(Gauge(100, 100, at=0), Gauge(0, 100, at=0), at=0)
g.max_gauge.add_momentum(-1, until=40)
g.min_gauge.add_momentum(+1, until=40)
assert g.determination == [(0, 0), (60, 60)]
g.clear_momenta(at=30)
g.add_momentum(-1)
assert g.determination == [(30, 30), (40, 40)]
def test_in_range():
g = Gauge(20, 10, at=0)
assert not g.in_range(0)
assert not g.in_range(20)
g.add_momentum(-1)
assert not g.in_range(0)
assert g.in_range(20)
def test_clamp():
g = Gauge(20, max=10, min=0, at=0)
assert g.clamp(at=0) == 10
g = Gauge(-10, max=10, min=0, at=0)
assert g.clamp(at=0) == 0
def test_momentum_event_order():
class MyGauge(Gauge):
def _make_momentum(self, m):
return m
g = MyGauge(0, 100, at=0)
m = Momentum(+1, since=10, until=10)
g.add_momentum(m)
assert \
list(g.momentum_events()) == \
[(0, NONE, None), (10, ADD, m), (10, REMOVE, m), (+inf, NONE, None)]
def test_case7():
f = FakeGauge([(0, 0), (1, 1)])
g = Gauge(3.5, f, at=-1)
g.add_momentum(-2)
g.add_momentum(+1)
assert g.determination == [(-1, 3.5), (0.5, 0.5), (1, 0)]
def test_case7_reversed():
f = FakeGauge([(0, 0), (1, -1)])
g = Gauge(-3.5, 0, f, at=-1)
g.add_momentum(+2)
g.add_momentum(-1)
assert g.determination == [(-1, -3.5), (0.5, -0.5), (1, 0)]
def test_case8():
"""There's a hyper-gauge. When the same effects are affected twice, the
underlying gauge became to be out of the limited range.
"""
m = Gauge(679, 679, at=1503918965.158631)
m.add_momentum(+0.001157)
g = Gauge(679, m, at=1503918965.158631)
g.add_momentum(+1)
# Gauge "g" should be always in the range of "m".
def G_SHOULD_BE_FULLY_IN_RANGE():
assert g.determination.in_range_since == g.base[TIME]
G_SHOULD_BE_FULLY_IN_RANGE()
# first effect ------------------------------------------------------------
m.forget_past(at=1503919261.248346)
G_SHOULD_BE_FULLY_IN_RANGE()
m.add_momentum(0, since=1503919261.248346, until=1503919266.248346)
m.forget_past(at=1503919261.248346)
G_SHOULD_BE_FULLY_IN_RANGE()
m.add_momentum(-0.2, since=1503919261.248346, until=1503919561.248346)
G_SHOULD_BE_FULLY_IN_RANGE()
# second effect -----------------------------------------------------------
m.forget_past(at=1503919279.381339)
G_SHOULD_BE_FULLY_IN_RANGE()
m.forget_past(at=1503919279.381339)
G_SHOULD_BE_FULLY_IN_RANGE()
m.add_momentum(0, since=1503919279.381339, until=1503919284.381339)
G_SHOULD_BE_FULLY_IN_RANGE()
m.forget_past(at=1503919279.482356)
m.remove_momentum(-0.2, since=1503919261.248346, until=1503919561.248346)
G_SHOULD_BE_FULLY_IN_RANGE()
with pytest.raises(ValueError):
m.forget_past(at=1503919279.381339)
m.add_momentum(-0.2, since=1503919279.381339, until=1503919579.381339)
G_SHOULD_BE_FULLY_IN_RANGE() # failing!
m.forget_past(at=1503919287.680848)
G_SHOULD_BE_FULLY_IN_RANGE() # failing!
def test_case8_simple():
max_ = Gauge(10, 10, at=0)
max_.add_momentum(-1)
g = Gauge(10, max_, at=0)
max_.forget_past(at=2)
with pytest.raises(ValueError):
max_.forget_past(at=1) # forget older past.
assert g.get(99999) == approx(0)
def test_intersection_of_vertical_segment():
assert 0 != 1e-309
assert math.isinf(1 / 1e-309)
f = FakeGauge([(0, 0), (1e-309, 1)])
assert f.get(0.000000000000000000000) == 0
assert f.get(0.000000000000000000001) == 1
g = Gauge(2.5, f, at=-1)
g.add_momentum(-2)
g.add_momentum(+1)
assert \
round_determination(g.determination, precision=1) == \
[(-1, 2.5), (0, 0.5), (0.5, 0)]
def test_intersection_of_vertical_segment_reversed():
f = FakeGauge([(0, 0), (1e-309, -1)])
g = Gauge(-2.5, 0, f, at=-1)
g.add_momentum(+2)
g.add_momentum(-1)
assert \
round_determination(g.determination, precision=1) == \
[(-1, -2.5), (0, -0.5), (0.5, 0)]
def test_invalidate_returns():
g = Gauge(0, 100, at=0)
assert not g.invalidate()
g.get(0)
assert g.invalidate()
assert not g.invalidate()
|
"""
The runner2 module is designed to run spider in one command.
To archive this all necessary information should be put into files.
These files contains:
* The spider package file. (spider.egg)
* The spider setting file. (spider.json)
* (Optional) Plugin packages. (`plugins/xxx.egg`)
This module can also resolve uninstalled dependencies installation.
"""
import os
import logging
import json
import yaml
import string
import random
import tempfile
import sys
import shutil
from argparse import ArgumentParser
from scrapydd.workspace import SpiderSetting
from .runner import main as runner_main
from .plugin import perform, _pip_installer
logger = logging.getLogger(__name__)
def randomString(stringLength=10):
"""Generate a random string of fixed length """
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(stringLength))
def main():
"""
Need put plugin packages(eggs) in the `plugin` folder first.
:return:
"""
parser = ArgumentParser()
parser.add_argument('-f', '--file', dest='file', required=False,
default='spider.json', help='The spider settings json '
'file')
args = parser.parse_args()
file_ext = os.path.splitext(args.file)[1]
if file_ext.lower() in ('.yaml', '.yml'):
with open(args.file, 'r') as f:
dic = yaml.load(f, yaml.Loader)
elif file_ext.lower() == '.json':
with open(args.file, 'r') as f:
dic = json.load(f)
else:
raise Exception(f'Not supported file type : {args.file}')
spider_setting = SpiderSetting.from_dict(dic)
plugin_settings = spider_setting.plugin_settings
extra_requirements = spider_setting.extra_requirements
if extra_requirements:
for requirement in extra_requirements:
_pip_installer(requirement)
try:
settings_module = 'settings_' + randomString(6)
settings_package = tempfile.mkdtemp()
settings_stream = open(os.path.join(settings_package,
settings_module+'.py'), 'w')
perform(base_module=spider_setting.base_settings_module,
output_file=settings_stream, input_file=plugin_settings)
settings_stream.close()
sys.path.append(settings_package)
os.environ['SCRAPY_EXTRA_SETTINGS_MODULE'] = settings_module
output_file = spider_setting.output_file or 'items.jl'
argv = ['scrapy', 'crawl', spider_setting.spider_name, '-o', output_file]
for param_key, param_value in spider_setting.spider_parameters.items():
argv += [
'-s',
'%s=%s' % (param_key, param_value)
]
runner_main(argv)
except SystemExit:
pass
finally:
if os.path.exists(settings_package):
shutil.rmtree(settings_package)
def print_usage():
print("usage:")
print('runner2 <command> [options]')
print('available commands:')
print(' crawl')
print(' list')
print('')
print('options:')
print('-g, --egg egg_file : specify spider egg file. Default is spider.egg in working folder.')
print('-s, --settings settings_file : specify the spider settings json file. Default is spider.json in ')
print(' working folder.')
if __name__ == '__main__':
main()
perform plugin module only when plugin settings is not none
"""
The runner2 module is designed to run spider in one command.
To archive this all necessary information should be put into files.
These files contains:
* The spider package file. (spider.egg)
* The spider setting file. (spider.json)
* (Optional) Plugin packages. (`plugins/xxx.egg`)
This module can also resolve uninstalled dependencies installation.
"""
import os
import logging
import json
import yaml
import string
import random
import tempfile
import sys
import shutil
from argparse import ArgumentParser
from scrapydd.workspace import SpiderSetting
from .runner import main as runner_main
from .plugin import perform, _pip_installer
logger = logging.getLogger(__name__)
def randomString(stringLength=10):
"""Generate a random string of fixed length """
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(stringLength))
def main():
"""
Need put plugin packages(eggs) in the `plugin` folder first.
:return:
"""
parser = ArgumentParser()
parser.add_argument('-f', '--file', dest='file', required=False,
default='spider.json', help='The spider settings json '
'file')
args = parser.parse_args()
file_ext = os.path.splitext(args.file)[1]
if file_ext.lower() in ('.yaml', '.yml'):
with open(args.file, 'r') as f:
dic = yaml.load(f, yaml.Loader)
elif file_ext.lower() == '.json':
with open(args.file, 'r') as f:
dic = json.load(f)
else:
raise Exception(f'Not supported file type : {args.file}')
spider_setting = SpiderSetting.from_dict(dic)
plugin_settings = spider_setting.plugin_settings
extra_requirements = spider_setting.extra_requirements
if extra_requirements:
for requirement in extra_requirements:
_pip_installer(requirement)
try:
settings_module = 'settings_' + randomString(6)
settings_package = tempfile.mkdtemp()
settings_stream = open(os.path.join(settings_package,
settings_module+'.py'), 'w')
if plugin_settings:
perform(base_module=spider_setting.base_settings_module,
output_file=settings_stream, input_file=plugin_settings)
settings_stream.close()
sys.path.append(settings_package)
os.environ['SCRAPY_EXTRA_SETTINGS_MODULE'] = settings_module
output_file = spider_setting.output_file or 'items.jl'
argv = ['scrapy', 'crawl', spider_setting.spider_name, '-o', output_file]
for param_key, param_value in spider_setting.spider_parameters.items():
argv += [
'-s',
'%s=%s' % (param_key, param_value)
]
runner_main(argv)
except SystemExit:
pass
finally:
if os.path.exists(settings_package):
shutil.rmtree(settings_package)
def print_usage():
print("usage:")
print('runner2 <command> [options]')
print('available commands:')
print(' crawl')
print(' list')
print('')
print('options:')
print('-g, --egg egg_file : specify spider egg file. Default is spider.egg in working folder.')
print('-s, --settings settings_file : specify the spider settings json file. Default is spider.json in ')
print(' working folder.')
if __name__ == '__main__':
main()
|
# Copyright 2012 the rootpy developers
# distributed under the terms of the GNU General Public License
from __future__ import absolute_import
import os
import re
import shutil
from glob import glob
import ROOT
from . import log; log = log[__name__]
from ...memory.keepalive import keepalive
from ...utils.silence import silence_sout_serr
from ...utils.path import mkdir_p
from ...context import (
do_nothing, working_directory, preserve_current_directory)
from ...io import root_open
from ... import asrootpy
from . import Channel, Measurement, HistoSys, OverallSys
__all__ = [
'make_channel',
'make_measurement',
'make_models',
'make_model',
'make_workspace',
'measurements_from_xml',
'write_measurement',
'patch_xml',
'split_norm_shape',
]
def make_channel(name, samples, data=None, verbose=False):
"""
Create a Channel from a list of Samples
"""
if verbose:
llog = log['make_channel']
llog.info("creating channel {0}".format(name))
# avoid segfault if name begins with a digit by using "channel_" prefix
chan = Channel('channel_{0}'.format(name))
chan.SetStatErrorConfig(0.05, "Poisson")
if data is not None:
if verbose:
llog.info("setting data")
chan.SetData(data)
for sample in samples:
if verbose:
llog.info("adding sample {0}".format(sample.GetName()))
chan.AddSample(sample)
return chan
def make_measurement(name,
channels,
lumi=1.0, lumi_rel_error=0.,
output_prefix='./histfactory',
POI=None,
const_params=None,
verbose=False):
"""
Create a Measurement from a list of Channels
"""
if verbose:
llog = log['make_measurement']
llog.info("creating measurement {0}".format(name))
if not isinstance(channels, (list, tuple)):
channels = [channels]
# Create the measurement
meas = Measurement('measurement_{0}'.format(name), '')
meas.SetOutputFilePrefix(output_prefix)
if POI is not None:
if isinstance(POI, basestring):
if verbose:
llog.info("setting POI {0}".format(POI))
meas.SetPOI(POI)
else:
if verbose:
llog.info("adding POIs {0}".format(', '.join(POI)))
for p in POI:
meas.AddPOI(p)
if verbose:
llog.info("setting lumi={0:f} +/- {1:f}".format(lumi, lumi_rel_error))
meas.lumi = lumi
meas.lumi_rel_error = lumi_rel_error
for channel in channels:
if verbose:
llog.info("adding channel {0}".format(channel.GetName()))
meas.AddChannel(channel)
if const_params is not None:
if verbose:
llog.info("adding constant parameters {0}".format(
', '.join(const_params)))
for param in const_params:
meas.AddConstantParam(param)
return meas
def make_models(measurement, silence=False):
"""
Create a workspace containing all models for a Measurement
If `silence` is True, then silence HistFactory's output on
stdout and stderr.
"""
context = silence_sout_serr if silence else do_nothing
with context():
workspace = ROOT.RooStats.HistFactory.MakeModelAndMeasurementFast(
measurement)
return asrootpy(workspace)
def make_model(measurement, channel=None, silence=False):
"""
Create a workspace containing the model for a measurement
If `channel` is None then include all channels in the model
If `silence` is True, then silence HistFactory's output on
stdout and stderr.
"""
context = silence_sout_serr if silence else do_nothing
with context():
hist2workspace = ROOT.RooStats.HistFactory.HistoToWorkspaceFactoryFast(
measurement)
if channel is not None:
workspace = hist2workspace.MakeSingleChannelModel(
measurement, channel)
else:
workspace = hist2workspace.MakeCombinedModel(measurement)
workspace = asrootpy(workspace)
keepalive(workspace, measurement)
return workspace
def make_workspace(name, channels,
lumi=1.0, lumi_rel_error=0.,
output_prefix='./histfactory',
POI=None,
const_params=None,
silence=False):
"""
Create a workspace from a list of channels
"""
if not isinstance(channels, (list, tuple)):
channels = [channels]
measurement = make_measurement(
name, channels,
lumi=lumi,
lumi_rel_error=lumi_rel_error,
output_prefix=output_prefix,
POI=POI,
const_params=const_params)
workspace = make_model(measurement, silence=silence)
workspace.SetName('workspace_{0}'.format(name))
return workspace, measurement
def measurements_from_xml(filename,
collect_histograms=True,
cd_parent=False,
silence=False):
"""
Read in a list of Measurements from XML; The equivalent of what
hist2workspace does before calling MakeModelAndMeasurementFast
(see make_models()).
"""
if not os.path.isfile(filename):
raise OSError("the file {0} does not exist".format(filename))
silence_context = silence_sout_serr if silence else do_nothing
filename = os.path.abspath(os.path.normpath(filename))
if cd_parent:
xml_directory = os.path.dirname(filename)
parent = os.path.abspath(os.path.join(xml_directory, os.pardir))
cd_context = working_directory
else:
parent = None
cd_context = do_nothing
log.info("parsing XML in {0} ...".format(filename))
with cd_context(parent):
parser = ROOT.RooStats.HistFactory.ConfigParser()
with silence_context():
measurements_vect = parser.GetMeasurementsFromXML(filename)
# prevent measurements_vect from being garbage collected
ROOT.SetOwnership(measurements_vect, False)
measurements = []
for m in measurements_vect:
if collect_histograms:
with silence_context():
m.CollectHistograms()
measurements.append(asrootpy(m))
return measurements
def write_measurement(measurement,
root_file=None,
xml_path=None,
output_path=None,
output_suffix=None,
write_workspaces=False,
apply_xml_patches=True,
silence=False):
"""
Write a measurement and RooWorkspaces for all contained channels
into a ROOT file and write the XML files into a directory.
Parameters
----------
measurement : HistFactory::Measurement
An asrootpy'd ``HistFactory::Measurement`` object
root_file : ROOT TFile or string, optional (default=None)
A ROOT file or string file name. The measurement and workspaces
will be written to this file. If ``root_file is None`` then a
new file will be created with the same name as the measurement and
with the prefix ``ws_``.
xml_path : string, optional (default=None)
A directory path to write the XML into. If None, a new directory with
the same name as the measurement and with the prefix ``xml_`` will be
created.
output_path : string, optional (default=None)
If ``root_file is None``, create the ROOT file under this path.
If ``xml_path is None``, create the XML directory under this path.
output_suffix : string, optional (default=None)
If ``root_file is None`` then a new file is created with the same name
as the measurement and with the prefix ``ws_``. ``output_suffix`` will
append a suffix to this file name (before the .root extension).
If ``xml_path is None``, then a new directory is created with the
same name as the measurement and with the prefix ``xml_``.
``output_suffix`` will append a suffix to this directory name.
write_workspaces : bool, optional (default=False)
If True then also write a RooWorkspace for each channel and for all
channels combined.
apply_xml_patches : bool, optional (default=True)
Apply fixes on the output of ``Measurement::PrintXML()`` to avoid known
HistFactory bugs. Some of the patches assume that the ROOT file
containing the histograms will exist one directory level up from the
XML and that hist2workspace, or any tool that later reads the XML will
run from that same directory containing the ROOT file.
silence : bool, optional (default=False)
If True then capture and silence all stdout/stderr output from
HistFactory.
"""
context = silence_sout_serr if silence else do_nothing
output_name = measurement.name
if output_suffix is not None:
output_name += '_{0}'.format(output_suffix)
output_name = output_name.replace(' ', '_')
if xml_path is None:
xml_path = 'xml_{0}'.format(output_name)
if output_path is not None:
xml_path = os.path.join(output_path, xml_path)
if not os.path.exists(xml_path):
mkdir_p(xml_path)
if root_file is None:
root_file = 'ws_{0}.root'.format(output_name)
if output_path is not None:
root_file = os.path.join(output_path, root_file)
own_file = False
if isinstance(root_file, basestring):
root_file = root_open(root_file, 'recreate')
own_file = True
with preserve_current_directory():
root_file.cd()
log.info("writing histograms and measurement in {0} ...".format(
root_file.GetName()))
with context():
measurement.writeToFile(root_file)
# get modified measurement
out_m = root_file.Get(measurement.name)
log.info("writing XML in {0} ...".format(xml_path))
with context():
out_m.PrintXML(xml_path)
if write_workspaces:
log.info("writing combined model in {0} ...".format(
root_file.GetName()))
workspace = make_model(measurement, silence=silence)
workspace.Write()
for channel in measurement.channels:
log.info("writing model for channel `{0}` in {1} ...".format(
channel.name, root_file.GetName()))
workspace = make_model(
measurement, channel=channel, silence=silence)
workspace.Write()
if apply_xml_patches:
# patch the output XML to avoid HistFactory bugs
patch_xml(glob(os.path.join(xml_path, '*.xml')),
root_file=os.path.basename(root_file.GetName()))
if own_file:
root_file.Close()
def patch_xml(files, root_file=None, float_precision=3):
"""
Apply patches to HistFactory XML output from PrintXML
"""
if float_precision < 0:
raise ValueError("precision must be greater than 0")
def fix_path(match):
path = match.group(1)
if path:
head, tail = os.path.split(path)
new_path = os.path.join(os.path.basename(head), tail)
else:
new_path = ''
return '<Input>{0}</Input>'.format(new_path)
for xmlfilename in files:
xmlfilename = os.path.abspath(os.path.normpath(xmlfilename))
patched_xmlfilename = '{0}.tmp'.format(xmlfilename)
log.info("patching {0} ...".format(xmlfilename))
fin = open(xmlfilename, 'r')
fout = open(patched_xmlfilename, 'w')
for line in fin:
if root_file is not None:
line = re.sub(
'InputFile="[^"]*"',
'InputFile="{0}"'.format(root_file), line)
line = line.replace(
'<StatError Activate="True" InputFile="" '
'HistoName="" HistoPath="" />',
'<StatError Activate="True" />')
line = re.sub(
'<Combination OutputFilePrefix="(\S*)" >',
'<Combination OutputFilePrefix="hist2workspace" >', line)
line = re.sub('\w+=""', '', line)
line = re.sub('\s+/>', ' />', line)
line = re.sub('(\S)\s+</', r'\1</', line)
# HistFactory bug:
line = re.sub('InputFileHigh="\S+"', '', line)
line = re.sub('InputFileLow="\S+"', '', line)
# HistFactory bug:
line = line.replace(
'<ParamSetting Const="True"></ParamSetting>', '')
# chop off floats to desired precision
line = re.sub(
r'"(\d*\.\d{{{0:d},}})"'.format(float_precision + 1),
lambda x: '"{0}"'.format(
str(round(float(x.group(1)), float_precision))),
line)
line = re.sub('"\s\s+(\S)', r'" \1', line)
line = re.sub('<Input>(.*)</Input>', fix_path, line)
fout.write(line)
fin.close()
fout.close()
shutil.move(patched_xmlfilename, xmlfilename)
if not os.path.isfile(os.path.join(
os.path.dirname(xmlfilename),
'HistFactorySchema.dtd')):
rootsys = os.getenv('ROOTSYS', None)
if rootsys is not None:
dtdfile = os.path.join(rootsys, 'etc/HistFactorySchema.dtd')
target = os.path.dirname(xmlfilename)
if os.path.isfile(dtdfile):
log.info("copying {0} to {1} ...".format(dtdfile, target))
shutil.copy(dtdfile, target)
else:
log.warning("{0} does not exist".format(dtdfile))
else:
log.warning(
"$ROOTSYS is not set so cannot find HistFactorySchema.dtd")
def split_norm_shape(histosys, nominal_hist):
"""
Split a HistoSys into normalization (OverallSys) and shape (HistoSys)
components.
It is recommended to use OverallSys as much as possible, which tries to
enforce continuity up to the second derivative during
interpolation/extrapolation. So, if there is indeed a shape variation, then
factorize it into shape and normalization components.
"""
up = histosys.GetHistoHigh()
dn = histosys.GetHistoLow()
up = up.Clone(name=up.name + '_shape')
dn = dn.Clone(name=dn.name + '_shape')
n_nominal = nominal_hist.integral(overflow=True)
n_up = up.integral(overflow=True)
n_dn = dn.integral(overflow=True)
up.Scale(n_nominal / n_up)
dn.Scale(n_nominal / n_dn)
shape = HistoSys(histosys.GetName(), low=dn, high=up)
norm = OverallSys(histosys.GetName(),
low=n_dn / n_nominal,
high=n_up / n_nominal)
return norm, shape
protect against zero division
# Copyright 2012 the rootpy developers
# distributed under the terms of the GNU General Public License
from __future__ import absolute_import
import os
import re
import shutil
from glob import glob
import ROOT
from . import log; log = log[__name__]
from ...memory.keepalive import keepalive
from ...utils.silence import silence_sout_serr
from ...utils.path import mkdir_p
from ...context import (
do_nothing, working_directory, preserve_current_directory)
from ...io import root_open
from ... import asrootpy
from . import Channel, Measurement, HistoSys, OverallSys
__all__ = [
'make_channel',
'make_measurement',
'make_models',
'make_model',
'make_workspace',
'measurements_from_xml',
'write_measurement',
'patch_xml',
'split_norm_shape',
]
def make_channel(name, samples, data=None, verbose=False):
"""
Create a Channel from a list of Samples
"""
if verbose:
llog = log['make_channel']
llog.info("creating channel {0}".format(name))
# avoid segfault if name begins with a digit by using "channel_" prefix
chan = Channel('channel_{0}'.format(name))
chan.SetStatErrorConfig(0.05, "Poisson")
if data is not None:
if verbose:
llog.info("setting data")
chan.SetData(data)
for sample in samples:
if verbose:
llog.info("adding sample {0}".format(sample.GetName()))
chan.AddSample(sample)
return chan
def make_measurement(name,
channels,
lumi=1.0, lumi_rel_error=0.,
output_prefix='./histfactory',
POI=None,
const_params=None,
verbose=False):
"""
Create a Measurement from a list of Channels
"""
if verbose:
llog = log['make_measurement']
llog.info("creating measurement {0}".format(name))
if not isinstance(channels, (list, tuple)):
channels = [channels]
# Create the measurement
meas = Measurement('measurement_{0}'.format(name), '')
meas.SetOutputFilePrefix(output_prefix)
if POI is not None:
if isinstance(POI, basestring):
if verbose:
llog.info("setting POI {0}".format(POI))
meas.SetPOI(POI)
else:
if verbose:
llog.info("adding POIs {0}".format(', '.join(POI)))
for p in POI:
meas.AddPOI(p)
if verbose:
llog.info("setting lumi={0:f} +/- {1:f}".format(lumi, lumi_rel_error))
meas.lumi = lumi
meas.lumi_rel_error = lumi_rel_error
for channel in channels:
if verbose:
llog.info("adding channel {0}".format(channel.GetName()))
meas.AddChannel(channel)
if const_params is not None:
if verbose:
llog.info("adding constant parameters {0}".format(
', '.join(const_params)))
for param in const_params:
meas.AddConstantParam(param)
return meas
def make_models(measurement, silence=False):
"""
Create a workspace containing all models for a Measurement
If `silence` is True, then silence HistFactory's output on
stdout and stderr.
"""
context = silence_sout_serr if silence else do_nothing
with context():
workspace = ROOT.RooStats.HistFactory.MakeModelAndMeasurementFast(
measurement)
return asrootpy(workspace)
def make_model(measurement, channel=None, silence=False):
"""
Create a workspace containing the model for a measurement
If `channel` is None then include all channels in the model
If `silence` is True, then silence HistFactory's output on
stdout and stderr.
"""
context = silence_sout_serr if silence else do_nothing
with context():
hist2workspace = ROOT.RooStats.HistFactory.HistoToWorkspaceFactoryFast(
measurement)
if channel is not None:
workspace = hist2workspace.MakeSingleChannelModel(
measurement, channel)
else:
workspace = hist2workspace.MakeCombinedModel(measurement)
workspace = asrootpy(workspace)
keepalive(workspace, measurement)
return workspace
def make_workspace(name, channels,
lumi=1.0, lumi_rel_error=0.,
output_prefix='./histfactory',
POI=None,
const_params=None,
silence=False):
"""
Create a workspace from a list of channels
"""
if not isinstance(channels, (list, tuple)):
channels = [channels]
measurement = make_measurement(
name, channels,
lumi=lumi,
lumi_rel_error=lumi_rel_error,
output_prefix=output_prefix,
POI=POI,
const_params=const_params)
workspace = make_model(measurement, silence=silence)
workspace.SetName('workspace_{0}'.format(name))
return workspace, measurement
def measurements_from_xml(filename,
collect_histograms=True,
cd_parent=False,
silence=False):
"""
Read in a list of Measurements from XML; The equivalent of what
hist2workspace does before calling MakeModelAndMeasurementFast
(see make_models()).
"""
if not os.path.isfile(filename):
raise OSError("the file {0} does not exist".format(filename))
silence_context = silence_sout_serr if silence else do_nothing
filename = os.path.abspath(os.path.normpath(filename))
if cd_parent:
xml_directory = os.path.dirname(filename)
parent = os.path.abspath(os.path.join(xml_directory, os.pardir))
cd_context = working_directory
else:
parent = None
cd_context = do_nothing
log.info("parsing XML in {0} ...".format(filename))
with cd_context(parent):
parser = ROOT.RooStats.HistFactory.ConfigParser()
with silence_context():
measurements_vect = parser.GetMeasurementsFromXML(filename)
# prevent measurements_vect from being garbage collected
ROOT.SetOwnership(measurements_vect, False)
measurements = []
for m in measurements_vect:
if collect_histograms:
with silence_context():
m.CollectHistograms()
measurements.append(asrootpy(m))
return measurements
def write_measurement(measurement,
root_file=None,
xml_path=None,
output_path=None,
output_suffix=None,
write_workspaces=False,
apply_xml_patches=True,
silence=False):
"""
Write a measurement and RooWorkspaces for all contained channels
into a ROOT file and write the XML files into a directory.
Parameters
----------
measurement : HistFactory::Measurement
An asrootpy'd ``HistFactory::Measurement`` object
root_file : ROOT TFile or string, optional (default=None)
A ROOT file or string file name. The measurement and workspaces
will be written to this file. If ``root_file is None`` then a
new file will be created with the same name as the measurement and
with the prefix ``ws_``.
xml_path : string, optional (default=None)
A directory path to write the XML into. If None, a new directory with
the same name as the measurement and with the prefix ``xml_`` will be
created.
output_path : string, optional (default=None)
If ``root_file is None``, create the ROOT file under this path.
If ``xml_path is None``, create the XML directory under this path.
output_suffix : string, optional (default=None)
If ``root_file is None`` then a new file is created with the same name
as the measurement and with the prefix ``ws_``. ``output_suffix`` will
append a suffix to this file name (before the .root extension).
If ``xml_path is None``, then a new directory is created with the
same name as the measurement and with the prefix ``xml_``.
``output_suffix`` will append a suffix to this directory name.
write_workspaces : bool, optional (default=False)
If True then also write a RooWorkspace for each channel and for all
channels combined.
apply_xml_patches : bool, optional (default=True)
Apply fixes on the output of ``Measurement::PrintXML()`` to avoid known
HistFactory bugs. Some of the patches assume that the ROOT file
containing the histograms will exist one directory level up from the
XML and that hist2workspace, or any tool that later reads the XML will
run from that same directory containing the ROOT file.
silence : bool, optional (default=False)
If True then capture and silence all stdout/stderr output from
HistFactory.
"""
context = silence_sout_serr if silence else do_nothing
output_name = measurement.name
if output_suffix is not None:
output_name += '_{0}'.format(output_suffix)
output_name = output_name.replace(' ', '_')
if xml_path is None:
xml_path = 'xml_{0}'.format(output_name)
if output_path is not None:
xml_path = os.path.join(output_path, xml_path)
if not os.path.exists(xml_path):
mkdir_p(xml_path)
if root_file is None:
root_file = 'ws_{0}.root'.format(output_name)
if output_path is not None:
root_file = os.path.join(output_path, root_file)
own_file = False
if isinstance(root_file, basestring):
root_file = root_open(root_file, 'recreate')
own_file = True
with preserve_current_directory():
root_file.cd()
log.info("writing histograms and measurement in {0} ...".format(
root_file.GetName()))
with context():
measurement.writeToFile(root_file)
# get modified measurement
out_m = root_file.Get(measurement.name)
log.info("writing XML in {0} ...".format(xml_path))
with context():
out_m.PrintXML(xml_path)
if write_workspaces:
log.info("writing combined model in {0} ...".format(
root_file.GetName()))
workspace = make_model(measurement, silence=silence)
workspace.Write()
for channel in measurement.channels:
log.info("writing model for channel `{0}` in {1} ...".format(
channel.name, root_file.GetName()))
workspace = make_model(
measurement, channel=channel, silence=silence)
workspace.Write()
if apply_xml_patches:
# patch the output XML to avoid HistFactory bugs
patch_xml(glob(os.path.join(xml_path, '*.xml')),
root_file=os.path.basename(root_file.GetName()))
if own_file:
root_file.Close()
def patch_xml(files, root_file=None, float_precision=3):
"""
Apply patches to HistFactory XML output from PrintXML
"""
if float_precision < 0:
raise ValueError("precision must be greater than 0")
def fix_path(match):
path = match.group(1)
if path:
head, tail = os.path.split(path)
new_path = os.path.join(os.path.basename(head), tail)
else:
new_path = ''
return '<Input>{0}</Input>'.format(new_path)
for xmlfilename in files:
xmlfilename = os.path.abspath(os.path.normpath(xmlfilename))
patched_xmlfilename = '{0}.tmp'.format(xmlfilename)
log.info("patching {0} ...".format(xmlfilename))
fin = open(xmlfilename, 'r')
fout = open(patched_xmlfilename, 'w')
for line in fin:
if root_file is not None:
line = re.sub(
'InputFile="[^"]*"',
'InputFile="{0}"'.format(root_file), line)
line = line.replace(
'<StatError Activate="True" InputFile="" '
'HistoName="" HistoPath="" />',
'<StatError Activate="True" />')
line = re.sub(
'<Combination OutputFilePrefix="(\S*)" >',
'<Combination OutputFilePrefix="hist2workspace" >', line)
line = re.sub('\w+=""', '', line)
line = re.sub('\s+/>', ' />', line)
line = re.sub('(\S)\s+</', r'\1</', line)
# HistFactory bug:
line = re.sub('InputFileHigh="\S+"', '', line)
line = re.sub('InputFileLow="\S+"', '', line)
# HistFactory bug:
line = line.replace(
'<ParamSetting Const="True"></ParamSetting>', '')
# chop off floats to desired precision
line = re.sub(
r'"(\d*\.\d{{{0:d},}})"'.format(float_precision + 1),
lambda x: '"{0}"'.format(
str(round(float(x.group(1)), float_precision))),
line)
line = re.sub('"\s\s+(\S)', r'" \1', line)
line = re.sub('<Input>(.*)</Input>', fix_path, line)
fout.write(line)
fin.close()
fout.close()
shutil.move(patched_xmlfilename, xmlfilename)
if not os.path.isfile(os.path.join(
os.path.dirname(xmlfilename),
'HistFactorySchema.dtd')):
rootsys = os.getenv('ROOTSYS', None)
if rootsys is not None:
dtdfile = os.path.join(rootsys, 'etc/HistFactorySchema.dtd')
target = os.path.dirname(xmlfilename)
if os.path.isfile(dtdfile):
log.info("copying {0} to {1} ...".format(dtdfile, target))
shutil.copy(dtdfile, target)
else:
log.warning("{0} does not exist".format(dtdfile))
else:
log.warning(
"$ROOTSYS is not set so cannot find HistFactorySchema.dtd")
def split_norm_shape(histosys, nominal_hist):
"""
Split a HistoSys into normalization (OverallSys) and shape (HistoSys)
components.
It is recommended to use OverallSys as much as possible, which tries to
enforce continuity up to the second derivative during
interpolation/extrapolation. So, if there is indeed a shape variation, then
factorize it into shape and normalization components.
"""
up = histosys.GetHistoHigh()
dn = histosys.GetHistoLow()
up = up.Clone(name=up.name + '_shape')
dn = dn.Clone(name=dn.name + '_shape')
n_nominal = nominal_hist.integral(overflow=True)
n_up = up.integral(overflow=True)
n_dn = dn.integral(overflow=True)
if n_up != 0:
up.Scale(n_nominal / n_up)
if n_dn != 0:
dn.Scale(n_nominal / n_dn)
shape = HistoSys(histosys.GetName(), low=dn, high=up)
norm = OverallSys(histosys.GetName(),
low=n_dn / n_nominal,
high=n_up / n_nominal)
return norm, shape
|
import errno
import os
import sys
import time
import traceback
import warnings
from eventlet.green import urllib
from eventlet.green import socket
from eventlet.green import BaseHTTPServer
from eventlet import greenpool
from eventlet import greenio
from eventlet.support import get_errno
DEFAULT_MAX_SIMULTANEOUS_REQUESTS = 1024
DEFAULT_MAX_HTTP_VERSION = 'HTTP/1.1'
MAX_REQUEST_LINE = 8192
MINIMUM_CHUNK_SIZE = 4096
DEFAULT_LOG_FORMAT= ('%(client_ip)s - - [%(date_time)s] "%(request_line)s"'
' %(status_code)s %(body_length)s %(wall_seconds).6f')
__all__ = ['server', 'format_date_time']
# Weekday and month names for HTTP date/time formatting; always English!
_weekdayname = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
_monthname = [None, # Dummy so we can use 1-based month numbers
"Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
def format_date_time(timestamp):
"""Formats a unix timestamp into an HTTP standard string."""
year, month, day, hh, mm, ss, wd, _y, _z = time.gmtime(timestamp)
return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
_weekdayname[wd], day, _monthname[month], year, hh, mm, ss
)
# Collections of error codes to compare against. Not all attributes are set
# on errno module on all platforms, so some are literals :(
BAD_SOCK = set((errno.EBADF, 10053))
BROKEN_SOCK = set((errno.EPIPE, errno.ECONNRESET))
# special flag return value for apps
class _AlreadyHandled(object):
def __iter__(self):
return self
def next(self):
raise StopIteration
ALREADY_HANDLED = _AlreadyHandled()
class Input(object):
def __init__(self,
rfile,
content_length,
wfile=None,
wfile_line=None,
chunked_input=False):
self.rfile = rfile
if content_length is not None:
content_length = int(content_length)
self.content_length = content_length
self.wfile = wfile
self.wfile_line = wfile_line
self.position = 0
self.chunked_input = chunked_input
self.chunk_length = -1
def _do_read(self, reader, length=None):
if self.wfile is not None:
## 100 Continue
self.wfile.write(self.wfile_line)
self.wfile = None
self.wfile_line = None
if length is None and self.content_length is not None:
length = self.content_length - self.position
if length and length > self.content_length - self.position:
length = self.content_length - self.position
if not length:
return ''
try:
read = reader(length)
except greenio.SSL.ZeroReturnError:
read = ''
self.position += len(read)
return read
def _chunked_read(self, rfile, length=None):
if self.wfile is not None:
## 100 Continue
self.wfile.write(self.wfile_line)
self.wfile = None
self.wfile_line = None
response = []
try:
if length is None:
if self.chunk_length > self.position:
response.append(rfile.read(self.chunk_length - self.position))
while self.chunk_length != 0:
self.chunk_length = int(rfile.readline(), 16)
response.append(rfile.read(self.chunk_length))
rfile.readline()
else:
while length > 0 and self.chunk_length != 0:
if self.chunk_length > self.position:
response.append(rfile.read(
min(self.chunk_length - self.position, length)))
length -= len(response[-1])
self.position += len(response[-1])
if self.chunk_length == self.position:
rfile.readline()
else:
self.chunk_length = int(rfile.readline(), 16)
self.position = 0
if not self.chunk_length:
rfile.readline()
except greenio.SSL.ZeroReturnError:
pass
return ''.join(response)
def read(self, length=None):
if self.chunked_input:
return self._chunked_read(self.rfile, length)
return self._do_read(self.rfile.read, length)
def readline(self, size=None):
return self._do_read(self.rfile.readline)
def readlines(self, hint=None):
return self._do_read(self.rfile.readlines, hint)
def __iter__(self):
return iter(self.read())
def get_socket(self):
return self.rfile._sock.dup()
class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler):
protocol_version = 'HTTP/1.1'
minimum_chunk_size = MINIMUM_CHUNK_SIZE
def setup(self):
# overriding SocketServer.setup to correctly handle SSL.Connection objects
conn = self.connection = self.request
try:
self.rfile = conn.makefile('rb', self.rbufsize)
self.wfile = conn.makefile('wb', self.wbufsize)
except (AttributeError, NotImplementedError):
if hasattr(conn, 'send') and hasattr(conn, 'recv'):
# it's an SSL.Connection
self.rfile = socket._fileobject(conn, "rb", self.rbufsize)
self.wfile = socket._fileobject(conn, "wb", self.wbufsize)
else:
# it's a SSLObject, or a martian
raise NotImplementedError("wsgi.py doesn't support sockets "\
"of type %s" % type(conn))
def handle_one_request(self):
if self.server.max_http_version:
self.protocol_version = self.server.max_http_version
if self.rfile.closed:
self.close_connection = 1
return
try:
self.raw_requestline = self.rfile.readline(MAX_REQUEST_LINE)
if len(self.raw_requestline) == MAX_REQUEST_LINE:
self.wfile.write(
"HTTP/1.0 414 Request URI Too Long\r\n"
"Connection: close\r\nContent-length: 0\r\n\r\n")
self.close_connection = 1
return
except greenio.SSL.ZeroReturnError:
self.raw_requestline = ''
except socket.error, e:
if get_errno(e) not in BAD_SOCK:
raise
self.raw_requestline = ''
if not self.raw_requestline:
self.close_connection = 1
return
if not self.parse_request():
return
content_length = self.headers.getheader('content-length')
if content_length:
try:
int(content_length)
except ValueError:
self.wfile.write(
"HTTP/1.0 400 Bad Request\r\n"
"Connection: close\r\nContent-length: 0\r\n\r\n")
self.close_connection = 1
return
self.environ = self.get_environ()
self.application = self.server.app
try:
self.server.outstanding_requests += 1
try:
self.handle_one_response()
except socket.error, e:
# Broken pipe, connection reset by peer
if get_errno(e) not in BROKEN_SOCK:
raise
finally:
self.server.outstanding_requests -= 1
def handle_one_response(self):
start = time.time()
headers_set = []
headers_sent = []
wfile = self.wfile
result = None
use_chunked = [False]
length = [0]
status_code = [200]
def write(data, _writelines=wfile.writelines):
towrite = []
if not headers_set:
raise AssertionError("write() before start_response()")
elif not headers_sent:
status, response_headers = headers_set
headers_sent.append(1)
header_list = [header[0].lower() for header in response_headers]
towrite.append('%s %s\r\n' % (self.protocol_version, status))
for header in response_headers:
towrite.append('%s: %s\r\n' % header)
# send Date header?
if 'date' not in header_list:
towrite.append('Date: %s\r\n' % (format_date_time(time.time()),))
client_conn = self.headers.get('Connection', '').lower()
send_keep_alive = False
if self.server.keepalive and (client_conn == 'keep-alive' or \
(self.request_version == 'HTTP/1.1' and
not client_conn == 'close')):
# only send keep-alives back to clients that sent them,
# it's redundant for 1.1 connections
send_keep_alive = (client_conn == 'keep-alive')
self.close_connection = 0
else:
self.close_connection = 1
if 'content-length' not in header_list:
if self.request_version == 'HTTP/1.1':
use_chunked[0] = True
towrite.append('Transfer-Encoding: chunked\r\n')
elif 'content-length' not in header_list:
# client is 1.0 and therefore must read to EOF
self.close_connection = 1
if self.close_connection:
towrite.append('Connection: close\r\n')
elif send_keep_alive:
towrite.append('Connection: keep-alive\r\n')
towrite.append('\r\n')
# end of header writing
if use_chunked[0]:
## Write the chunked encoding
towrite.append("%x\r\n%s\r\n" % (len(data), data))
else:
towrite.append(data)
try:
_writelines(towrite)
length[0] = length[0] + sum(map(len, towrite))
except UnicodeEncodeError:
print "Encountered unicode while attempting to write wsgi response: ", \
[x for x in towrite if isinstance(x, unicode)]
traceback.print_exc()
_writelines(
["HTTP/1.0 500 Internal Server Error\r\n",
"Connection: close\r\n",
"Content-type: text/plain\r\n",
"Content-length: 98\r\n",
"\r\n",
("Internal Server Error: wsgi application passed "
"a unicode object to the server instead of a string.")])
def start_response(status, response_headers, exc_info=None):
status_code[0] = status.split()[0]
if exc_info:
try:
if headers_sent:
# Re-raise original exception if headers sent
raise exc_info[0], exc_info[1], exc_info[2]
finally:
# Avoid dangling circular ref
exc_info = None
capitalized_headers = [('-'.join([x.capitalize()
for x in key.split('-')]), value)
for key, value in response_headers]
headers_set[:] = [status, capitalized_headers]
return write
try:
try:
result = self.application(self.environ, start_response)
if isinstance(result, _AlreadyHandled):
self.close_connection = 1
return
if not headers_sent and hasattr(result, '__len__') and \
'Content-Length' not in [h for h, _v in headers_set[1]]:
headers_set[1].append(('Content-Length', str(sum(map(len, result)))))
towrite = []
towrite_size = 0
just_written_size = 0
for data in result:
towrite.append(data)
towrite_size += len(data)
if towrite_size >= self.minimum_chunk_size:
write(''.join(towrite))
towrite = []
just_written_size = towrite_size
towrite_size = 0
if towrite:
just_written_size = towrite_size
write(''.join(towrite))
if not headers_sent or (use_chunked[0] and just_written_size):
write('')
except Exception:
self.close_connection = 1
exc = traceback.format_exc()
print exc
if not headers_set:
start_response("500 Internal Server Error",
[('Content-type', 'text/plain')])
write(exc)
finally:
if hasattr(result, 'close'):
result.close()
if (self.environ['eventlet.input'].position
< self.environ.get('CONTENT_LENGTH', 0)):
## Read and discard body if there was no pending 100-continue
if not self.environ['eventlet.input'].wfile:
while self.environ['eventlet.input'].read(MINIMUM_CHUNK_SIZE):
pass
finish = time.time()
self.server.log_message(self.server.log_format % dict(
client_ip=self.get_client_ip(),
date_time=self.log_date_time_string(),
request_line=self.requestline,
status_code=status_code[0],
body_length=length[0],
wall_seconds=finish - start))
def get_client_ip(self):
client_ip = self.client_address[0]
if self.server.log_x_forwarded_for:
forward = self.headers.get('X-Forwarded-For', '').replace(' ', '')
if forward:
client_ip = "%s,%s" % (forward, client_ip)
return client_ip
def get_environ(self):
env = self.server.get_environ()
env['REQUEST_METHOD'] = self.command
env['SCRIPT_NAME'] = ''
if '?' in self.path:
path, query = self.path.split('?', 1)
else:
path, query = self.path, ''
env['PATH_INFO'] = urllib.unquote(path)
env['QUERY_STRING'] = query
if self.headers.typeheader is None:
env['CONTENT_TYPE'] = self.headers.type
else:
env['CONTENT_TYPE'] = self.headers.typeheader
length = self.headers.getheader('content-length')
if length:
env['CONTENT_LENGTH'] = length
env['SERVER_PROTOCOL'] = 'HTTP/1.0'
host, port = self.request.getsockname()
env['SERVER_NAME'] = host
env['SERVER_PORT'] = str(port)
env['REMOTE_ADDR'] = self.client_address[0]
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
for h in self.headers.headers:
k, v = h.split(':', 1)
k = k.replace('-', '_').upper()
v = v.strip()
if k in env:
continue
envk = 'HTTP_' + k
if envk in env:
env[envk] += ',' + v
else:
env[envk] = v
if env.get('HTTP_EXPECT') == '100-continue':
wfile = self.wfile
wfile_line = 'HTTP/1.1 100 Continue\r\n\r\n'
else:
wfile = None
wfile_line = None
chunked = env.get('HTTP_TRANSFER_ENCODING', '').lower() == 'chunked'
env['wsgi.input'] = env['eventlet.input'] = Input(
self.rfile, length, wfile=wfile, wfile_line=wfile_line,
chunked_input=chunked)
return env
def finish(self):
BaseHTTPServer.BaseHTTPRequestHandler.finish(self)
greenio.shutdown_safe(self.connection)
self.connection.close()
class Server(BaseHTTPServer.HTTPServer):
def __init__(self,
socket,
address,
app,
log=None,
environ=None,
max_http_version=None,
protocol=HttpProtocol,
minimum_chunk_size=None,
log_x_forwarded_for=True,
keepalive=True,
log_format=DEFAULT_LOG_FORMAT):
self.outstanding_requests = 0
self.socket = socket
self.address = address
if log:
self.log = log
else:
self.log = sys.stderr
self.app = app
self.keepalive = keepalive
self.environ = environ
self.max_http_version = max_http_version
self.protocol = protocol
self.pid = os.getpid()
if minimum_chunk_size is not None:
protocol.minimum_chunk_size = minimum_chunk_size
self.log_x_forwarded_for = log_x_forwarded_for
self.log_format = log_format
def get_environ(self):
d = {
'wsgi.errors': sys.stderr,
'wsgi.version': (1, 0),
'wsgi.multithread': True,
'wsgi.multiprocess': False,
'wsgi.run_once': False,
'wsgi.url_scheme': 'http',
}
if self.environ is not None:
d.update(self.environ)
return d
def process_request(self, (socket, address)):
proto = self.protocol(socket, address, self)
proto.handle()
def log_message(self, message):
self.log.write(message + '\n')
try:
import ssl
ACCEPT_EXCEPTIONS = (socket.error, ssl.SSLError)
ACCEPT_ERRNO = set((errno.EPIPE, errno.EBADF, errno.ECONNRESET,
ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_SSL))
except ImportError:
ACCEPT_EXCEPTIONS = (socket.error,)
ACCEPT_ERRNO = set((errno.EPIPE, errno.EBADF, errno.ECONNRESET))
def server(sock, site,
log=None,
environ=None,
max_size=None,
max_http_version=DEFAULT_MAX_HTTP_VERSION,
protocol=HttpProtocol,
server_event=None,
minimum_chunk_size=None,
log_x_forwarded_for=True,
custom_pool=None,
keepalive=True,
log_format=DEFAULT_LOG_FORMAT):
""" Start up a wsgi server handling requests from the supplied server
socket. This function loops forever. The *sock* object will be closed after server exits,
but the underlying file descriptor will remain open, so if you have a dup() of *sock*,
it will remain usable.
:param sock: Server socket, must be already bound to a port and listening.
:param site: WSGI application function.
:param log: File-like object that logs should be written to. If not specified, sys.stderr is used.
:param environ: Additional parameters that go into the environ dictionary of every request.
:param max_size: Maximum number of client connections opened at any time by this server.
:param max_http_version: Set to "HTTP/1.0" to make the server pretend it only supports HTTP 1.0. This can help with applications or clients that don't behave properly using HTTP 1.1.
:param protocol: Protocol class. Deprecated.
:param server_event: Used to collect the Server object. Deprecated.
:param minimum_chunk_size: Minimum size in bytes for http chunks. This can be used to improve performance of applications which yield many small strings, though using it technically violates the WSGI spec.
:param log_x_forwarded_for: If True (the default), logs the contents of the x-forwarded-for header in addition to the actual client ip address in the 'client_ip' field of the log line.
:param custom_pool: A custom GreenPool instance which is used to spawn client green threads. If this is supplied, max_size is ignored.
:param keepalive: If set to False, disables keepalives on the server; all connections will be closed after serving one request.
:param log_format: A python format string that is used as the template to generate log lines. The following values can be formatted into it: client_ip, date_time, request_line, status_code, body_length, wall_seconds. Look the default for an example of how to use this.
"""
serv = Server(sock, sock.getsockname(),
site, log,
environ=environ,
max_http_version=max_http_version,
protocol=protocol,
minimum_chunk_size=minimum_chunk_size,
log_x_forwarded_for=log_x_forwarded_for,
keepalive=keepalive,
log_format=log_format)
if server_event is not None:
server_event.send(serv)
if max_size is None:
max_size = DEFAULT_MAX_SIMULTANEOUS_REQUESTS
if custom_pool is not None:
pool = custom_pool
else:
pool = greenpool.GreenPool(max_size)
try:
host, port = sock.getsockname()
port = ':%s' % (port, )
if hasattr(sock, 'do_handshake'):
scheme = 'https'
if port == ':443':
port = ''
else:
scheme = 'http'
if port == ':80':
port = ''
serv.log.write("(%s) wsgi starting up on %s://%s%s/\n" % (
os.getpid(), scheme, host, port))
while True:
try:
client_socket = sock.accept()
try:
pool.spawn_n(serv.process_request, client_socket)
except AttributeError:
warnings.warn("wsgi's pool should be an instance of " \
"eventlet.greenpool.GreenPool, is %s. Please convert your"\
" call site to use GreenPool instead" % type(pool),
DeprecationWarning, stacklevel=2)
pool.execute_async(serv.process_request, client_socket)
except ACCEPT_EXCEPTIONS, e:
if get_errno(e) not in ACCEPT_ERRNO:
raise
except (KeyboardInterrupt, SystemExit):
serv.log.write("wsgi exiting\n")
break
finally:
try:
# NOTE: It's not clear whether we want this to leave the
# socket open or close it. Use cases like Spawning want
# the underlying fd to remain open, but if we're going
# that far we might as well not bother closing sock at
# all.
sock.close()
except socket.error, e:
if get_errno(e) not in BROKEN_SOCK:
traceback.print_exc()
Fix for infinite loop in wsgi.py, thanks to redbo's repro.
import errno
import os
import sys
import time
import traceback
import warnings
from eventlet.green import urllib
from eventlet.green import socket
from eventlet.green import BaseHTTPServer
from eventlet import greenpool
from eventlet import greenio
from eventlet.support import get_errno
DEFAULT_MAX_SIMULTANEOUS_REQUESTS = 1024
DEFAULT_MAX_HTTP_VERSION = 'HTTP/1.1'
MAX_REQUEST_LINE = 8192
MINIMUM_CHUNK_SIZE = 4096
DEFAULT_LOG_FORMAT= ('%(client_ip)s - - [%(date_time)s] "%(request_line)s"'
' %(status_code)s %(body_length)s %(wall_seconds).6f')
__all__ = ['server', 'format_date_time']
# Weekday and month names for HTTP date/time formatting; always English!
_weekdayname = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
_monthname = [None, # Dummy so we can use 1-based month numbers
"Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
def format_date_time(timestamp):
"""Formats a unix timestamp into an HTTP standard string."""
year, month, day, hh, mm, ss, wd, _y, _z = time.gmtime(timestamp)
return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
_weekdayname[wd], day, _monthname[month], year, hh, mm, ss
)
# Collections of error codes to compare against. Not all attributes are set
# on errno module on all platforms, so some are literals :(
BAD_SOCK = set((errno.EBADF, 10053))
BROKEN_SOCK = set((errno.EPIPE, errno.ECONNRESET))
# special flag return value for apps
class _AlreadyHandled(object):
def __iter__(self):
return self
def next(self):
raise StopIteration
ALREADY_HANDLED = _AlreadyHandled()
class Input(object):
def __init__(self,
rfile,
content_length,
wfile=None,
wfile_line=None,
chunked_input=False):
self.rfile = rfile
if content_length is not None:
content_length = int(content_length)
self.content_length = content_length
self.wfile = wfile
self.wfile_line = wfile_line
self.position = 0
self.chunked_input = chunked_input
self.chunk_length = -1
def _do_read(self, reader, length=None):
if self.wfile is not None:
## 100 Continue
self.wfile.write(self.wfile_line)
self.wfile = None
self.wfile_line = None
if length is None and self.content_length is not None:
length = self.content_length - self.position
if length and length > self.content_length - self.position:
length = self.content_length - self.position
if not length:
return ''
try:
read = reader(length)
except greenio.SSL.ZeroReturnError:
read = ''
self.position += len(read)
return read
def _chunked_read(self, rfile, length=None):
if self.wfile is not None:
## 100 Continue
self.wfile.write(self.wfile_line)
self.wfile = None
self.wfile_line = None
response = []
try:
if length is None:
if self.chunk_length > self.position:
response.append(rfile.read(self.chunk_length - self.position))
while self.chunk_length != 0:
self.chunk_length = int(rfile.readline(), 16)
response.append(rfile.read(self.chunk_length))
rfile.readline()
else:
while length > 0 and self.chunk_length != 0:
if self.chunk_length > self.position:
response.append(rfile.read(
min(self.chunk_length - self.position, length)))
last_read = len(response[-1])
if last_read == 0:
break
length -= last_read
self.position += last_read
if self.chunk_length == self.position:
rfile.readline()
else:
self.chunk_length = int(rfile.readline(), 16)
self.position = 0
if not self.chunk_length:
rfile.readline()
except greenio.SSL.ZeroReturnError:
pass
return ''.join(response)
def read(self, length=None):
if self.chunked_input:
return self._chunked_read(self.rfile, length)
return self._do_read(self.rfile.read, length)
def readline(self, size=None):
return self._do_read(self.rfile.readline)
def readlines(self, hint=None):
return self._do_read(self.rfile.readlines, hint)
def __iter__(self):
return iter(self.read())
def get_socket(self):
return self.rfile._sock.dup()
class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler):
protocol_version = 'HTTP/1.1'
minimum_chunk_size = MINIMUM_CHUNK_SIZE
def setup(self):
# overriding SocketServer.setup to correctly handle SSL.Connection objects
conn = self.connection = self.request
try:
self.rfile = conn.makefile('rb', self.rbufsize)
self.wfile = conn.makefile('wb', self.wbufsize)
except (AttributeError, NotImplementedError):
if hasattr(conn, 'send') and hasattr(conn, 'recv'):
# it's an SSL.Connection
self.rfile = socket._fileobject(conn, "rb", self.rbufsize)
self.wfile = socket._fileobject(conn, "wb", self.wbufsize)
else:
# it's a SSLObject, or a martian
raise NotImplementedError("wsgi.py doesn't support sockets "\
"of type %s" % type(conn))
def handle_one_request(self):
if self.server.max_http_version:
self.protocol_version = self.server.max_http_version
if self.rfile.closed:
self.close_connection = 1
return
try:
self.raw_requestline = self.rfile.readline(MAX_REQUEST_LINE)
if len(self.raw_requestline) == MAX_REQUEST_LINE:
self.wfile.write(
"HTTP/1.0 414 Request URI Too Long\r\n"
"Connection: close\r\nContent-length: 0\r\n\r\n")
self.close_connection = 1
return
except greenio.SSL.ZeroReturnError:
self.raw_requestline = ''
except socket.error, e:
if get_errno(e) not in BAD_SOCK:
raise
self.raw_requestline = ''
if not self.raw_requestline:
self.close_connection = 1
return
if not self.parse_request():
return
content_length = self.headers.getheader('content-length')
if content_length:
try:
int(content_length)
except ValueError:
self.wfile.write(
"HTTP/1.0 400 Bad Request\r\n"
"Connection: close\r\nContent-length: 0\r\n\r\n")
self.close_connection = 1
return
self.environ = self.get_environ()
self.application = self.server.app
try:
self.server.outstanding_requests += 1
try:
self.handle_one_response()
except socket.error, e:
# Broken pipe, connection reset by peer
if get_errno(e) not in BROKEN_SOCK:
raise
finally:
self.server.outstanding_requests -= 1
def handle_one_response(self):
start = time.time()
headers_set = []
headers_sent = []
wfile = self.wfile
result = None
use_chunked = [False]
length = [0]
status_code = [200]
def write(data, _writelines=wfile.writelines):
towrite = []
if not headers_set:
raise AssertionError("write() before start_response()")
elif not headers_sent:
status, response_headers = headers_set
headers_sent.append(1)
header_list = [header[0].lower() for header in response_headers]
towrite.append('%s %s\r\n' % (self.protocol_version, status))
for header in response_headers:
towrite.append('%s: %s\r\n' % header)
# send Date header?
if 'date' not in header_list:
towrite.append('Date: %s\r\n' % (format_date_time(time.time()),))
client_conn = self.headers.get('Connection', '').lower()
send_keep_alive = False
if self.server.keepalive and (client_conn == 'keep-alive' or \
(self.request_version == 'HTTP/1.1' and
not client_conn == 'close')):
# only send keep-alives back to clients that sent them,
# it's redundant for 1.1 connections
send_keep_alive = (client_conn == 'keep-alive')
self.close_connection = 0
else:
self.close_connection = 1
if 'content-length' not in header_list:
if self.request_version == 'HTTP/1.1':
use_chunked[0] = True
towrite.append('Transfer-Encoding: chunked\r\n')
elif 'content-length' not in header_list:
# client is 1.0 and therefore must read to EOF
self.close_connection = 1
if self.close_connection:
towrite.append('Connection: close\r\n')
elif send_keep_alive:
towrite.append('Connection: keep-alive\r\n')
towrite.append('\r\n')
# end of header writing
if use_chunked[0]:
## Write the chunked encoding
towrite.append("%x\r\n%s\r\n" % (len(data), data))
else:
towrite.append(data)
try:
_writelines(towrite)
length[0] = length[0] + sum(map(len, towrite))
except UnicodeEncodeError:
print "Encountered unicode while attempting to write wsgi response: ", \
[x for x in towrite if isinstance(x, unicode)]
traceback.print_exc()
_writelines(
["HTTP/1.0 500 Internal Server Error\r\n",
"Connection: close\r\n",
"Content-type: text/plain\r\n",
"Content-length: 98\r\n",
"\r\n",
("Internal Server Error: wsgi application passed "
"a unicode object to the server instead of a string.")])
def start_response(status, response_headers, exc_info=None):
status_code[0] = status.split()[0]
if exc_info:
try:
if headers_sent:
# Re-raise original exception if headers sent
raise exc_info[0], exc_info[1], exc_info[2]
finally:
# Avoid dangling circular ref
exc_info = None
capitalized_headers = [('-'.join([x.capitalize()
for x in key.split('-')]), value)
for key, value in response_headers]
headers_set[:] = [status, capitalized_headers]
return write
try:
try:
result = self.application(self.environ, start_response)
if isinstance(result, _AlreadyHandled):
self.close_connection = 1
return
if not headers_sent and hasattr(result, '__len__') and \
'Content-Length' not in [h for h, _v in headers_set[1]]:
headers_set[1].append(('Content-Length', str(sum(map(len, result)))))
towrite = []
towrite_size = 0
just_written_size = 0
for data in result:
towrite.append(data)
towrite_size += len(data)
if towrite_size >= self.minimum_chunk_size:
write(''.join(towrite))
towrite = []
just_written_size = towrite_size
towrite_size = 0
if towrite:
just_written_size = towrite_size
write(''.join(towrite))
if not headers_sent or (use_chunked[0] and just_written_size):
write('')
except Exception:
self.close_connection = 1
exc = traceback.format_exc()
print exc
if not headers_set:
start_response("500 Internal Server Error",
[('Content-type', 'text/plain')])
write(exc)
finally:
if hasattr(result, 'close'):
result.close()
if (self.environ['eventlet.input'].position
< self.environ.get('CONTENT_LENGTH', 0)):
## Read and discard body if there was no pending 100-continue
if not self.environ['eventlet.input'].wfile:
while self.environ['eventlet.input'].read(MINIMUM_CHUNK_SIZE):
pass
finish = time.time()
self.server.log_message(self.server.log_format % dict(
client_ip=self.get_client_ip(),
date_time=self.log_date_time_string(),
request_line=self.requestline,
status_code=status_code[0],
body_length=length[0],
wall_seconds=finish - start))
def get_client_ip(self):
client_ip = self.client_address[0]
if self.server.log_x_forwarded_for:
forward = self.headers.get('X-Forwarded-For', '').replace(' ', '')
if forward:
client_ip = "%s,%s" % (forward, client_ip)
return client_ip
def get_environ(self):
env = self.server.get_environ()
env['REQUEST_METHOD'] = self.command
env['SCRIPT_NAME'] = ''
if '?' in self.path:
path, query = self.path.split('?', 1)
else:
path, query = self.path, ''
env['PATH_INFO'] = urllib.unquote(path)
env['QUERY_STRING'] = query
if self.headers.typeheader is None:
env['CONTENT_TYPE'] = self.headers.type
else:
env['CONTENT_TYPE'] = self.headers.typeheader
length = self.headers.getheader('content-length')
if length:
env['CONTENT_LENGTH'] = length
env['SERVER_PROTOCOL'] = 'HTTP/1.0'
host, port = self.request.getsockname()
env['SERVER_NAME'] = host
env['SERVER_PORT'] = str(port)
env['REMOTE_ADDR'] = self.client_address[0]
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
for h in self.headers.headers:
k, v = h.split(':', 1)
k = k.replace('-', '_').upper()
v = v.strip()
if k in env:
continue
envk = 'HTTP_' + k
if envk in env:
env[envk] += ',' + v
else:
env[envk] = v
if env.get('HTTP_EXPECT') == '100-continue':
wfile = self.wfile
wfile_line = 'HTTP/1.1 100 Continue\r\n\r\n'
else:
wfile = None
wfile_line = None
chunked = env.get('HTTP_TRANSFER_ENCODING', '').lower() == 'chunked'
env['wsgi.input'] = env['eventlet.input'] = Input(
self.rfile, length, wfile=wfile, wfile_line=wfile_line,
chunked_input=chunked)
return env
def finish(self):
BaseHTTPServer.BaseHTTPRequestHandler.finish(self)
greenio.shutdown_safe(self.connection)
self.connection.close()
class Server(BaseHTTPServer.HTTPServer):
def __init__(self,
socket,
address,
app,
log=None,
environ=None,
max_http_version=None,
protocol=HttpProtocol,
minimum_chunk_size=None,
log_x_forwarded_for=True,
keepalive=True,
log_format=DEFAULT_LOG_FORMAT):
self.outstanding_requests = 0
self.socket = socket
self.address = address
if log:
self.log = log
else:
self.log = sys.stderr
self.app = app
self.keepalive = keepalive
self.environ = environ
self.max_http_version = max_http_version
self.protocol = protocol
self.pid = os.getpid()
if minimum_chunk_size is not None:
protocol.minimum_chunk_size = minimum_chunk_size
self.log_x_forwarded_for = log_x_forwarded_for
self.log_format = log_format
def get_environ(self):
d = {
'wsgi.errors': sys.stderr,
'wsgi.version': (1, 0),
'wsgi.multithread': True,
'wsgi.multiprocess': False,
'wsgi.run_once': False,
'wsgi.url_scheme': 'http',
}
if self.environ is not None:
d.update(self.environ)
return d
def process_request(self, (socket, address)):
proto = self.protocol(socket, address, self)
proto.handle()
def log_message(self, message):
self.log.write(message + '\n')
try:
import ssl
ACCEPT_EXCEPTIONS = (socket.error, ssl.SSLError)
ACCEPT_ERRNO = set((errno.EPIPE, errno.EBADF, errno.ECONNRESET,
ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_SSL))
except ImportError:
ACCEPT_EXCEPTIONS = (socket.error,)
ACCEPT_ERRNO = set((errno.EPIPE, errno.EBADF, errno.ECONNRESET))
def server(sock, site,
log=None,
environ=None,
max_size=None,
max_http_version=DEFAULT_MAX_HTTP_VERSION,
protocol=HttpProtocol,
server_event=None,
minimum_chunk_size=None,
log_x_forwarded_for=True,
custom_pool=None,
keepalive=True,
log_format=DEFAULT_LOG_FORMAT):
""" Start up a wsgi server handling requests from the supplied server
socket. This function loops forever. The *sock* object will be closed after server exits,
but the underlying file descriptor will remain open, so if you have a dup() of *sock*,
it will remain usable.
:param sock: Server socket, must be already bound to a port and listening.
:param site: WSGI application function.
:param log: File-like object that logs should be written to. If not specified, sys.stderr is used.
:param environ: Additional parameters that go into the environ dictionary of every request.
:param max_size: Maximum number of client connections opened at any time by this server.
:param max_http_version: Set to "HTTP/1.0" to make the server pretend it only supports HTTP 1.0. This can help with applications or clients that don't behave properly using HTTP 1.1.
:param protocol: Protocol class. Deprecated.
:param server_event: Used to collect the Server object. Deprecated.
:param minimum_chunk_size: Minimum size in bytes for http chunks. This can be used to improve performance of applications which yield many small strings, though using it technically violates the WSGI spec.
:param log_x_forwarded_for: If True (the default), logs the contents of the x-forwarded-for header in addition to the actual client ip address in the 'client_ip' field of the log line.
:param custom_pool: A custom GreenPool instance which is used to spawn client green threads. If this is supplied, max_size is ignored.
:param keepalive: If set to False, disables keepalives on the server; all connections will be closed after serving one request.
:param log_format: A python format string that is used as the template to generate log lines. The following values can be formatted into it: client_ip, date_time, request_line, status_code, body_length, wall_seconds. Look the default for an example of how to use this.
"""
serv = Server(sock, sock.getsockname(),
site, log,
environ=environ,
max_http_version=max_http_version,
protocol=protocol,
minimum_chunk_size=minimum_chunk_size,
log_x_forwarded_for=log_x_forwarded_for,
keepalive=keepalive,
log_format=log_format)
if server_event is not None:
server_event.send(serv)
if max_size is None:
max_size = DEFAULT_MAX_SIMULTANEOUS_REQUESTS
if custom_pool is not None:
pool = custom_pool
else:
pool = greenpool.GreenPool(max_size)
try:
host, port = sock.getsockname()
port = ':%s' % (port, )
if hasattr(sock, 'do_handshake'):
scheme = 'https'
if port == ':443':
port = ''
else:
scheme = 'http'
if port == ':80':
port = ''
serv.log.write("(%s) wsgi starting up on %s://%s%s/\n" % (
os.getpid(), scheme, host, port))
while True:
try:
client_socket = sock.accept()
try:
pool.spawn_n(serv.process_request, client_socket)
except AttributeError:
warnings.warn("wsgi's pool should be an instance of " \
"eventlet.greenpool.GreenPool, is %s. Please convert your"\
" call site to use GreenPool instead" % type(pool),
DeprecationWarning, stacklevel=2)
pool.execute_async(serv.process_request, client_socket)
except ACCEPT_EXCEPTIONS, e:
if get_errno(e) not in ACCEPT_ERRNO:
raise
except (KeyboardInterrupt, SystemExit):
serv.log.write("wsgi exiting\n")
break
finally:
try:
# NOTE: It's not clear whether we want this to leave the
# socket open or close it. Use cases like Spawning want
# the underlying fd to remain open, but if we're going
# that far we might as well not bother closing sock at
# all.
sock.close()
except socket.error, e:
if get_errno(e) not in BROKEN_SOCK:
traceback.print_exc()
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from bigdl.util.common import JavaValue
from bigdl.util.common import callBigDlFunc
from bigdl.util.common import JTensor
from bigdl.nn.layer import Layer
import numpy as np
if sys.version >= '3':
long = int
unicode = str
class Criterion(JavaValue):
"""
Criterion is helpful to train a neural network.
Given an input and a target, they compute a gradient according to a given loss function.
"""
def __init__(self, jvalue, bigdl_type, *args):
self.value = jvalue if jvalue else callBigDlFunc(
bigdl_type, JavaValue.jvm_class_constructor(self), *args)
self.bigdl_type = bigdl_type
def __str__(self):
return self.value.toString()
def forward(self, input, target):
"""
NB: It's for debug only, please use optimizer.optimize() in production.
Takes an input object, and computes the corresponding loss of the criterion,
compared with `target`
:param input: ndarray or list of ndarray
:param target: ndarray or list of ndarray
:return: value of loss
"""
jinput, input_is_table = Layer.check_input(input)
jtarget, target_is_table = Layer.check_input(target)
output = callBigDlFunc(self.bigdl_type,
"criterionForward",
self.value,
jinput,
input_is_table,
jtarget,
target_is_table)
return output
def backward(self, input, target):
"""
NB: It's for debug only, please use optimizer.optimize() in production.
Performs a back-propagation step through the criterion, with respect to the given input.
:param input: ndarray or list of ndarray
:param target: ndarray or list of ndarray
:return: ndarray
"""
jinput, input_is_table = Layer.check_input(input)
jtarget, target_is_table = Layer.check_input(target)
output = callBigDlFunc(self.bigdl_type,
"criterionBackward",
self.value,
jinput,
input_is_table,
jtarget,
target_is_table)
return Layer.convert_output(output)
@classmethod
def of(cls, jcriterion, bigdl_type="float"):
"""
Create a python Criterion by a java criterion object
:param jcriterion: A java criterion object which created by Py4j
:return: a criterion.
"""
criterion = Criterion(bigdl_type, jcriterion)
criterion.value = jcriterion
criterion.bigdl_type = bigdl_type
return criterion
class ClassNLLCriterion(Criterion):
'''
The negative log likelihood criterion. It is useful to train a classification problem with n
classes. If provided, the optional argument weights should be a 1D Tensor assigning weight to
each of the classes. This is particularly useful when you have an unbalanced training set.
The input given through a forward() is expected to contain log-probabilities/probabilities of
each class: input has to be a 1D Tensor of size n. Obtaining log-probabilities/probabilities
in a neural network is easily achieved by adding a LogSoftMax/SoftMax layer in the last layer
of your neural network. You may use CrossEntropyCriterion instead, if you prefer not to add an
extra layer to your network. This criterion expects a class index (1 to the number of class) as
target when calling forward(input, target) and backward(input, target).
In the log-probabilities case,
The loss can be described as:
loss(x, class) = -x[class]
or in the case of the weights argument it is specified as follows:
loss(x, class) = -weights[class] * x[class]
Due to the behaviour of the backend code, it is necessary to set sizeAverage to false when
calculating losses in non-batch mode.
Note that if the target is `-1`, the training process will skip this sample.
In other will, the forward process will return zero output and the backward process
will also return zero `gradInput`.
By default, the losses are averaged over observations for each minibatch. However, if the field
sizeAverage is set to false, the losses are instead summed for each minibatch.
In particular, when weights=None, size_average=True and logProbAsInput=False, this is same as
`sparse_categorical_crossentropy` loss in keras.
:param weights: weights of each class
:param size_average: whether to average or not
:param logProbAsInput: indicating whether to accept log-probabilities or probabilities as input.
>>> np.random.seed(123)
>>> weights = np.random.uniform(0, 1, (2,)).astype("float32")
>>> classNLLCriterion = ClassNLLCriterion(weights, True, True)
creating: createClassNLLCriterion
>>> classNLLCriterion = ClassNLLCriterion()
creating: createClassNLLCriterion
'''
def __init__(self,
weights=None,
size_average=True,
logProbAsInput=True,
bigdl_type="float"):
super(ClassNLLCriterion, self).__init__(None, bigdl_type,
JTensor.from_ndarray(weights),
size_average, logProbAsInput)
class MSECriterion(Criterion):
'''
Creates a criterion that measures the mean squared error between n elements
in the input x and output y:
```
loss(x, y) = 1/n \sum |x_i - y_i|^2
```
If x and y are d-dimensional Tensors with a total of n elements,
the sum operation still operates over all the elements, and divides by n.
The two Tensors must have the same number of elements (but their sizes might be different).
The division by n can be avoided if one sets the internal variable sizeAverage to false.
By default, the losses are averaged over observations for each minibatch. However,
if the field sizeAverage is set to false, the losses are instead summed.
>>> mSECriterion = MSECriterion()
creating: createMSECriterion
'''
def __init__(self, bigdl_type="float"):
super(MSECriterion, self).__init__(None, bigdl_type)
class AbsCriterion(Criterion):
'''
measures the mean absolute value of the element-wise difference between input
>>> absCriterion = AbsCriterion(True)
creating: createAbsCriterion
'''
def __init__(self,
size_average=True,
bigdl_type="float"):
super(AbsCriterion, self).__init__(None, bigdl_type,
size_average)
class ClassSimplexCriterion(Criterion):
'''
ClassSimplexCriterion implements a criterion for classification.
It learns an embedding per class, where each class' embedding is a
point on an (N-1)-dimensional simplex, where N is the number of classes.
:param nClasses: the number of classes.
>>> classSimplexCriterion = ClassSimplexCriterion(2)
creating: createClassSimplexCriterion
'''
def __init__(self,
n_classes,
bigdl_type="float"):
super(ClassSimplexCriterion, self).__init__(None, bigdl_type,
n_classes)
class CosineDistanceCriterion(Criterion):
"""
Creates a criterion that measures the loss given an input and target,
Loss = 1 - cos(x, y)
>>> cosineDistanceCriterion = CosineDistanceCriterion(True)
creating: createCosineDistanceCriterion
>>> cosineDistanceCriterion.forward(np.array([1.0, 2.0, 3.0, 4.0, 5.0]),
... np.array([5.0, 4.0, 3.0, 2.0, 1.0]))
0.07272728
"""
def __init__(self,
size_average=True,
bigdl_type="float"):
super(CosineDistanceCriterion, self).__init__(None, bigdl_type,
size_average)
class CosineEmbeddingCriterion(Criterion):
"""
Creates a criterion that measures the loss given an input x = {x1, x2},
a table of two Tensors, and a Tensor label y with values 1 or -1.
:param margin: a number from -1 to 1, 0 to 0.5 is suggested
>>> cosineEmbeddingCriterion = CosineEmbeddingCriterion(1e-5, True)
creating: createCosineEmbeddingCriterion
>>> cosineEmbeddingCriterion.forward([np.array([1.0, 2.0, 3.0, 4.0, 5.0]),
... np.array([5.0, 4.0, 3.0, 2.0, 1.0])],
... [np.ones(5)])
0.0
"""
def __init__(self,
margin=0.0,
size_average=True,
bigdl_type="float"):
super(CosineEmbeddingCriterion, self).__init__(None, bigdl_type,
margin,
size_average)
class DistKLDivCriterion(Criterion):
'''
The Kullback-Leibler divergence criterion
:param sizeAverage:
>>> distKLDivCriterion = DistKLDivCriterion(True)
creating: createDistKLDivCriterion
'''
def __init__(self,
size_average=True,
bigdl_type="float"):
super(DistKLDivCriterion, self).__init__(None, bigdl_type,
size_average)
class CategoricalCrossEntropy(Criterion):
"""
This criterion is same with cross entropy criterion, except it takes a one-hot format target
tensor
>>> cce = CategoricalCrossEntropy()
creating: createCategoricalCrossEntropy
"""
def __init__(self, bigdl_type="float"):
super(CategoricalCrossEntropy, self).__init__(None, bigdl_type)
class HingeEmbeddingCriterion(Criterion):
'''
Creates a criterion that measures the loss given an
input x which is a 1-dimensional vector and a label y (1 or -1).
This is usually used for measuring whether two inputs are similar
or dissimilar,
e.g. using the L1 pairwise distance, and is typically used for
learning nonlinear embeddings or semi-supervised learning.
If x and y are n-dimensional Tensors, the sum operation still operates
over all the elements, and divides by n (this can be avoided if one sets
the internal variable sizeAverage to false). The margin has a default
value of 1, or can be set in the constructor.
>>> hingeEmbeddingCriterion = HingeEmbeddingCriterion(1e-5, True)
creating: createHingeEmbeddingCriterion
'''
def __init__(self,
margin=1.0,
size_average=True,
bigdl_type="float"):
super(HingeEmbeddingCriterion, self).__init__(None, bigdl_type,
margin,
size_average)
class L1HingeEmbeddingCriterion(Criterion):
'''
Creates a criterion that measures the loss given an input x = {x1, x2},
a table of two Tensors, and a label y (1 or -1):
:param margin:
>>> l1HingeEmbeddingCriterion = L1HingeEmbeddingCriterion(1e-5)
creating: createL1HingeEmbeddingCriterion
>>> l1HingeEmbeddingCriterion = L1HingeEmbeddingCriterion()
creating: createL1HingeEmbeddingCriterion
>>> input1 = np.array([2.1, -2.2])
>>> input2 = np.array([-0.55, 0.298])
>>> input = [input1, input2]
>>> target = np.array([1.0])
>>> result = l1HingeEmbeddingCriterion.forward(input, target)
>>> (result == 5.148)
True
'''
def __init__(self,
margin=1.0,
bigdl_type="float"):
super(L1HingeEmbeddingCriterion, self).__init__(None, bigdl_type,
margin)
class MarginCriterion(Criterion):
'''
Creates a criterion that optimizes a two-class classification hinge loss (margin-based loss)
between input x (a Tensor of dimension 1) and output y.
When margin = 1, size_average = True and squared = False, this is the same as hinge loss in keras;
When margin = 1, size_average = False and squared = True, this is the same as squared_hinge loss in keras.
:param margin: if unspecified, is by default 1.
:param size_average: size average in a mini-batch
:param squared: whether to calculate the squared hinge loss
>>> marginCriterion = MarginCriterion(1e-5, True, False)
creating: createMarginCriterion
'''
def __init__(self,
margin=1.0,
size_average=True,
squared=False,
bigdl_type="float"):
super(MarginCriterion, self).__init__(None, bigdl_type,
margin,
size_average,
squared)
class MarginRankingCriterion(Criterion):
'''
Creates a criterion that measures the loss given an input x = {x1, x2},
a table of two Tensors of size 1 (they contain only scalars), and a label y (1 or -1).
In batch mode, x is a table of two Tensors of size batchsize, and y is a Tensor of size
batchsize containing 1 or -1 for each corresponding pair of elements in the input Tensor.
If y == 1 then it assumed the first input should be ranked higher (have a larger value) than
the second input, and vice-versa for y == -1.
:param margin:
>>> marginRankingCriterion = MarginRankingCriterion(1e-5, True)
creating: createMarginRankingCriterion
'''
def __init__(self,
margin=1.0,
size_average=True,
bigdl_type="float"):
super(MarginRankingCriterion, self).__init__(None, bigdl_type,
margin,
size_average)
class MultiCriterion(Criterion):
'''
a weighted sum of other criterions each applied to the same input and target
>>> multiCriterion = MultiCriterion()
creating: createMultiCriterion
>>> mSECriterion = MSECriterion()
creating: createMSECriterion
>>> multiCriterion = multiCriterion.add(mSECriterion)
>>> multiCriterion = multiCriterion.add(mSECriterion)
'''
def __init__(self,
bigdl_type="float"):
super(MultiCriterion, self).__init__(None, bigdl_type)
def add(self, criterion, weight=1.0):
self.value.add(criterion.value, weight)
return self
class MultiLabelMarginCriterion(Criterion):
'''
Creates a criterion that optimizes a multi-class multi-classification hinge loss (
margin-based loss) between input x and output y (which is a Tensor of target class indices)
:param size_average: size average in a mini-batch
>>> multiLabelMarginCriterion = MultiLabelMarginCriterion(True)
creating: createMultiLabelMarginCriterion
'''
def __init__(self,
size_average=True,
bigdl_type="float"):
super(MultiLabelMarginCriterion, self).__init__(None, bigdl_type,
size_average)
class ParallelCriterion(Criterion):
'''
ParallelCriterion is a weighted sum of other criterions each applied to a different input
and target. Set repeatTarget = true to share the target for criterions.
Use add(criterion[, weight]) method to add criterion. Where weight is a scalar(default 1).
:param repeat_target: Whether to share the target for all criterions.
>>> parallelCriterion = ParallelCriterion(True)
creating: createParallelCriterion
>>> mSECriterion = MSECriterion()
creating: createMSECriterion
>>> parallelCriterion = parallelCriterion.add(mSECriterion)
>>> parallelCriterion = parallelCriterion.add(mSECriterion)
'''
def __init__(self,
repeat_target=False,
bigdl_type="float"):
super(ParallelCriterion, self).__init__(None, bigdl_type,
repeat_target)
def add(self, criterion, weight=1.0):
self.value.add(criterion.value, weight)
return self
class KLDCriterion(Criterion):
'''
Computes the KL-divergence of the Gaussian distribution.
>>> KLDCriterion = KLDCriterion()
creating: createKLDCriterion
'''
def __init__(self, bigdl_type="float"):
super(KLDCriterion, self).__init__(None, bigdl_type)
class GaussianCriterion(Criterion):
'''
Computes the log-likelihood of a sample x given a Gaussian distribution p.
>>> GaussianCriterion = GaussianCriterion()
creating: createGaussianCriterion
'''
def __init__(self, bigdl_type="float"):
super(GaussianCriterion, self).__init__(None, bigdl_type)
class SmoothL1Criterion(Criterion):
'''
Creates a criterion that can be thought of as a smooth version of the AbsCriterion.
It uses a squared term if the absolute element-wise error falls below 1.
It is less sensitive to outliers than the MSECriterion and in some
cases prevents exploding gradients (e.g. see "Fast R-CNN" paper by Ross Girshick).
```
| 0.5 * (x_i - y_i)^2^, if |x_i - y_i| < 1
loss(x, y) = 1/n \sum |
| |x_i - y_i| - 0.5, otherwise
```
If x and y are d-dimensional Tensors with a total of n elements,
the sum operation still operates over all the elements, and divides by n.
The division by n can be avoided if one sets the internal variable sizeAverage to false
:param size_average: whether to average the loss
>>> smoothL1Criterion = SmoothL1Criterion(True)
creating: createSmoothL1Criterion
'''
def __init__(self,
size_average=True,
bigdl_type="float"):
super(SmoothL1Criterion, self).__init__(None, bigdl_type,
size_average)
class SmoothL1CriterionWithWeights(Criterion):
'''
a smooth version of the AbsCriterion
It uses a squared term if the absolute element-wise error falls below 1.
It is less sensitive to outliers than the MSECriterion and in some cases
prevents exploding gradients (e.g. see "Fast R-CNN" paper by Ross Girshick).
```
d = (x - y) * w_in
loss(x, y, w_in, w_out)
| 0.5 * (sigma * d_i)^2 * w_out if |d_i| < 1 / sigma / sigma
= 1/n \sum |
| (|d_i| - 0.5 / sigma / sigma) * w_out otherwise
```
>>> smoothL1CriterionWithWeights = SmoothL1CriterionWithWeights(1e-5, 1)
creating: createSmoothL1CriterionWithWeights
'''
def __init__(self,
sigma,
num=0,
bigdl_type="float"):
super(SmoothL1CriterionWithWeights, self).__init__(None, bigdl_type,
sigma,
num)
class SoftmaxWithCriterion(Criterion):
'''
Computes the multinomial logistic loss for a one-of-many classification task,
passing real-valued predictions through a softmax to get a probability distribution over classes.
It should be preferred over separate SoftmaxLayer + MultinomialLogisticLossLayer
as its gradient computation is more numerically stable.
:param ignoreLabel: (optional) Specify a label value thatshould be ignored when computing the loss.
:param normalizeMode: How to normalize the output loss.
>>> softmaxWithCriterion = SoftmaxWithCriterion()
creating: createSoftmaxWithCriterion
>>> softmaxWithCriterion = SoftmaxWithCriterion(1, "FULL")
creating: createSoftmaxWithCriterion
'''
def __init__(self,
ignore_label=None,
normalize_mode="VALID",
bigdl_type="float"):
super(SoftmaxWithCriterion, self).__init__(None, bigdl_type,
ignore_label,
normalize_mode)
class TimeDistributedCriterion(Criterion):
'''
This class is intended to support inputs with 3 or more dimensions.
Apply Any Provided Criterion to every temporal slice of an input.
:param criterion: embedded criterion
:param size_average: whether to divide the sequence length
>>> td = TimeDistributedCriterion(ClassNLLCriterion())
creating: createClassNLLCriterion
creating: createTimeDistributedCriterion
'''
def __init__(self, criterion, size_average=False, bigdl_type="float"):
super(TimeDistributedCriterion, self).__init__(
None, bigdl_type, criterion, size_average)
class CrossEntropyCriterion(Criterion):
"""
This criterion combines LogSoftMax and ClassNLLCriterion in one single class.
:param weights: A tensor assigning weight to each of the classes
>>> np.random.seed(123)
>>> weights = np.random.uniform(0, 1, (2,)).astype("float32")
>>> cec = CrossEntropyCriterion(weights)
creating: createCrossEntropyCriterion
>>> cec = CrossEntropyCriterion()
creating: createCrossEntropyCriterion
"""
def __init__(self,
weights=None,
size_average=True,
bigdl_type="float"):
super(CrossEntropyCriterion, self).__init__(None, bigdl_type,
JTensor.from_ndarray(
weights),
size_average)
class BCECriterion(Criterion):
'''
Creates a criterion that measures the Binary Cross Entropy
between the target and the output
:param weights: weights for each class
:param sizeAverage: whether to average the loss or not
>>> np.random.seed(123)
>>> weights = np.random.uniform(0, 1, (2,)).astype("float32")
>>> bCECriterion = BCECriterion(weights)
creating: createBCECriterion
>>> bCECriterion = BCECriterion()
creating: createBCECriterion
'''
def __init__(self,
weights=None,
size_average=True,
bigdl_type="float"):
super(BCECriterion, self).__init__(None, bigdl_type,
JTensor.from_ndarray(weights),
size_average)
class MultiLabelSoftMarginCriterion(Criterion):
'''
A MultiLabel multiclass criterion based on sigmoid:
the loss is:
```
l(x,y) = - sum_i y[i] * log(p[i]) + (1 - y[i]) * log (1 - p[i])
```
where p[i] = exp(x[i]) / (1 + exp(x[i]))
and with weights:
```
l(x,y) = - sum_i weights[i] (y[i] * log(p[i]) + (1 - y[i]) * log (1 - p[i]))
```
>>> np.random.seed(123)
>>> weights = np.random.uniform(0, 1, (2,)).astype("float32")
>>> multiLabelSoftMarginCriterion = MultiLabelSoftMarginCriterion(weights)
creating: createMultiLabelSoftMarginCriterion
>>> multiLabelSoftMarginCriterion = MultiLabelSoftMarginCriterion()
creating: createMultiLabelSoftMarginCriterion
'''
def __init__(self,
weights=None,
size_average=True,
bigdl_type="float"):
super(MultiLabelSoftMarginCriterion, self).__init__(None, bigdl_type,
JTensor.from_ndarray(weights),
size_average)
class MultiMarginCriterion(Criterion):
'''
Creates a criterion that optimizes a multi-class classification hinge loss (margin-based loss)
between input x and output y (which is a target class index).
:param p:
:param weights:
:param margin:
:param size_average:
>>> np.random.seed(123)
>>> weights = np.random.uniform(0, 1, (2,)).astype("float32")
>>> multiMarginCriterion = MultiMarginCriterion(1,weights)
creating: createMultiMarginCriterion
>>> multiMarginCriterion = MultiMarginCriterion()
creating: createMultiMarginCriterion
'''
def __init__(self,
p=1,
weights=None,
margin=1.0,
size_average=True,
bigdl_type="float"):
super(MultiMarginCriterion, self).__init__(None, bigdl_type,
p,
JTensor.from_ndarray(weights),
margin,
size_average)
class SoftMarginCriterion(Criterion):
"""
Creates a criterion that optimizes a two-class classification logistic loss
between input x (a Tensor of dimension 1) and output y (which is a tensor
containing either 1s or -1s).
```
loss(x, y) = sum_i (log(1 + exp(-y[i]*x[i]))) / x:nElement()
```
:param sizeaverage: The normalization by the number of elements in the inputcan be disabled by setting
>>> softMarginCriterion = SoftMarginCriterion(False)
creating: createSoftMarginCriterion
>>> softMarginCriterion = SoftMarginCriterion()
creating: createSoftMarginCriterion
"""
def __init__(self,
size_average=True,
bigdl_type="float"):
super(SoftMarginCriterion, self).__init__(None, bigdl_type, size_average)
class DiceCoefficientCriterion(Criterion):
'''
The Dice-Coefficient criterion
input: Tensor,target: Tensor
```
return: 2 * (input intersection target)
1 - ----------------------------------
input union target
```
>>> diceCoefficientCriterion = DiceCoefficientCriterion(size_average = True, epsilon = 1.0)
creating: createDiceCoefficientCriterion
>>> diceCoefficientCriterion = DiceCoefficientCriterion()
creating: createDiceCoefficientCriterion
'''
def __init__(self,
size_average=True,
epsilon=1.0,
bigdl_type="float"):
super(DiceCoefficientCriterion, self).__init__(None, bigdl_type,
size_average,
epsilon)
class L1Cost(Criterion):
'''
compute L1 norm for input, and sign of input
>>> l1Cost = L1Cost()
creating: createL1Cost
'''
def __init__(self,
bigdl_type="float"):
super(L1Cost, self).__init__(None, bigdl_type)
class CosineProximityCriterion(Criterion):
'''
compute the negative of the mean cosine proximity between predictions and targets.
```
x'(i) = x(i) / sqrt(max(sum(x(i)^2), 1e-12))
y'(i) = y(i) / sqrt(max(sum(x(i)^2), 1e-12))
cosine_proximity(x, y) = sum_i(-1 * x'(i) * y'(i))
```
>>> cosineProximityCriterion = CosineProximityCriterion()
creating: createCosineProximityCriterion
'''
def __init__(self,
bigdl_type="float"):
super(CosineProximityCriterion, self).__init__(None, bigdl_type)
class MeanAbsolutePercentageCriterion(Criterion):
'''
This method is same as `mean_absolute_percentage_error` loss in keras.
It caculates diff = K.abs((y - x) / K.clip(K.abs(y), K.epsilon(), Double.MaxValue))
and return 100 * K.mean(diff) as outpout. Here, the x and y can have or not have a batch.
>>> error = MeanAbsolutePercentageCriterion()
creating: createMeanAbsolutePercentageCriterion
'''
def __init__(self,
bigdl_type="float"):
super(MeanAbsolutePercentageCriterion, self).__init__(None, bigdl_type)
class MeanSquaredLogarithmicCriterion(Criterion):
'''
This method is same as `mean_squared_logarithmic_error` loss in keras.
It calculates: first_log = K.log(K.clip(y, K.epsilon(), Double.MaxValue) + 1.)
second_log = K.log(K.clip(x, K.epsilon(), Double.MaxValue) + 1.)
and output K.mean(K.square(first_log - second_log)). Here, the x and y can have or not have a batch.
>>> error = MeanSquaredLogarithmicCriterion()
creating: createMeanSquaredLogarithmicCriterion
'''
def __init__(self,
bigdl_type="float"):
super(MeanSquaredLogarithmicCriterion, self).__init__(None, bigdl_type)
class KullbackLeiblerDivergenceCriterion(Criterion):
'''
compute Kullback Leibler DivergenceCriterion error for intput and target
This method is same as `kullback_leibler_divergence` loss in keras. Loss calculated as:
y_true = K.clip(input, K.epsilon(), 1)
y_pred = K.clip(target, K.epsilon(), 1)
and output K.sum(y_true * K.log(y_true / y_pred), axis=-1)
>>> error = KullbackLeiblerDivergenceCriterion()
creating: createKullbackLeiblerDivergenceCriterion
'''
def __init__(self,
bigdl_type="float"):
super(KullbackLeiblerDivergenceCriterion, self).__init__(None, bigdl_type)
class PoissonCriterion(Criterion):
'''
compute Poisson error for input and target, loss calculated as:
mean(input - target * K.log(input + K.epsilon()), axis=-1)
>>> error = PoissonCriterion()
creating: createPoissonCriterion
'''
def __init__(self,
bigdl_type="float"):
super(PoissonCriterion, self).__init__(None, bigdl_type)
def _test():
import doctest
from pyspark import SparkContext
from bigdl.nn import criterion
from bigdl.util.common import init_engine
from bigdl.util.common import create_spark_conf
globs = criterion.__dict__.copy()
sc = SparkContext(master="local[4]", appName="test criterion",
conf=create_spark_conf())
globs['sc'] = sc
init_engine()
(failure_count, test_count) = doctest.testmod(globs=globs,
optionflags=doctest.ELLIPSIS)
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
Fix KLDCriterion forward (#2078)
* fix KLDCriterion
* add python doc
* add unit tests
* add api doc
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from bigdl.util.common import JavaValue
from bigdl.util.common import callBigDlFunc
from bigdl.util.common import JTensor
from bigdl.nn.layer import Layer
import numpy as np
if sys.version >= '3':
long = int
unicode = str
class Criterion(JavaValue):
"""
Criterion is helpful to train a neural network.
Given an input and a target, they compute a gradient according to a given loss function.
"""
def __init__(self, jvalue, bigdl_type, *args):
self.value = jvalue if jvalue else callBigDlFunc(
bigdl_type, JavaValue.jvm_class_constructor(self), *args)
self.bigdl_type = bigdl_type
def __str__(self):
return self.value.toString()
def forward(self, input, target):
"""
NB: It's for debug only, please use optimizer.optimize() in production.
Takes an input object, and computes the corresponding loss of the criterion,
compared with `target`
:param input: ndarray or list of ndarray
:param target: ndarray or list of ndarray
:return: value of loss
"""
jinput, input_is_table = Layer.check_input(input)
jtarget, target_is_table = Layer.check_input(target)
output = callBigDlFunc(self.bigdl_type,
"criterionForward",
self.value,
jinput,
input_is_table,
jtarget,
target_is_table)
return output
def backward(self, input, target):
"""
NB: It's for debug only, please use optimizer.optimize() in production.
Performs a back-propagation step through the criterion, with respect to the given input.
:param input: ndarray or list of ndarray
:param target: ndarray or list of ndarray
:return: ndarray
"""
jinput, input_is_table = Layer.check_input(input)
jtarget, target_is_table = Layer.check_input(target)
output = callBigDlFunc(self.bigdl_type,
"criterionBackward",
self.value,
jinput,
input_is_table,
jtarget,
target_is_table)
return Layer.convert_output(output)
@classmethod
def of(cls, jcriterion, bigdl_type="float"):
"""
Create a python Criterion by a java criterion object
:param jcriterion: A java criterion object which created by Py4j
:return: a criterion.
"""
criterion = Criterion(bigdl_type, jcriterion)
criterion.value = jcriterion
criterion.bigdl_type = bigdl_type
return criterion
class ClassNLLCriterion(Criterion):
'''
The negative log likelihood criterion. It is useful to train a classification problem with n
classes. If provided, the optional argument weights should be a 1D Tensor assigning weight to
each of the classes. This is particularly useful when you have an unbalanced training set.
The input given through a forward() is expected to contain log-probabilities/probabilities of
each class: input has to be a 1D Tensor of size n. Obtaining log-probabilities/probabilities
in a neural network is easily achieved by adding a LogSoftMax/SoftMax layer in the last layer
of your neural network. You may use CrossEntropyCriterion instead, if you prefer not to add an
extra layer to your network. This criterion expects a class index (1 to the number of class) as
target when calling forward(input, target) and backward(input, target).
In the log-probabilities case,
The loss can be described as:
loss(x, class) = -x[class]
or in the case of the weights argument it is specified as follows:
loss(x, class) = -weights[class] * x[class]
Due to the behaviour of the backend code, it is necessary to set sizeAverage to false when
calculating losses in non-batch mode.
Note that if the target is `-1`, the training process will skip this sample.
In other will, the forward process will return zero output and the backward process
will also return zero `gradInput`.
By default, the losses are averaged over observations for each minibatch. However, if the field
sizeAverage is set to false, the losses are instead summed for each minibatch.
In particular, when weights=None, size_average=True and logProbAsInput=False, this is same as
`sparse_categorical_crossentropy` loss in keras.
:param weights: weights of each class
:param size_average: whether to average or not
:param logProbAsInput: indicating whether to accept log-probabilities or probabilities as input.
>>> np.random.seed(123)
>>> weights = np.random.uniform(0, 1, (2,)).astype("float32")
>>> classNLLCriterion = ClassNLLCriterion(weights, True, True)
creating: createClassNLLCriterion
>>> classNLLCriterion = ClassNLLCriterion()
creating: createClassNLLCriterion
'''
def __init__(self,
weights=None,
size_average=True,
logProbAsInput=True,
bigdl_type="float"):
super(ClassNLLCriterion, self).__init__(None, bigdl_type,
JTensor.from_ndarray(weights),
size_average, logProbAsInput)
class MSECriterion(Criterion):
'''
Creates a criterion that measures the mean squared error between n elements
in the input x and output y:
```
loss(x, y) = 1/n \sum |x_i - y_i|^2
```
If x and y are d-dimensional Tensors with a total of n elements,
the sum operation still operates over all the elements, and divides by n.
The two Tensors must have the same number of elements (but their sizes might be different).
The division by n can be avoided if one sets the internal variable sizeAverage to false.
By default, the losses are averaged over observations for each minibatch. However,
if the field sizeAverage is set to false, the losses are instead summed.
>>> mSECriterion = MSECriterion()
creating: createMSECriterion
'''
def __init__(self, bigdl_type="float"):
super(MSECriterion, self).__init__(None, bigdl_type)
class AbsCriterion(Criterion):
'''
measures the mean absolute value of the element-wise difference between input
>>> absCriterion = AbsCriterion(True)
creating: createAbsCriterion
'''
def __init__(self,
size_average=True,
bigdl_type="float"):
super(AbsCriterion, self).__init__(None, bigdl_type,
size_average)
class ClassSimplexCriterion(Criterion):
'''
ClassSimplexCriterion implements a criterion for classification.
It learns an embedding per class, where each class' embedding is a
point on an (N-1)-dimensional simplex, where N is the number of classes.
:param nClasses: the number of classes.
>>> classSimplexCriterion = ClassSimplexCriterion(2)
creating: createClassSimplexCriterion
'''
def __init__(self,
n_classes,
bigdl_type="float"):
super(ClassSimplexCriterion, self).__init__(None, bigdl_type,
n_classes)
class CosineDistanceCriterion(Criterion):
"""
Creates a criterion that measures the loss given an input and target,
Loss = 1 - cos(x, y)
>>> cosineDistanceCriterion = CosineDistanceCriterion(True)
creating: createCosineDistanceCriterion
>>> cosineDistanceCriterion.forward(np.array([1.0, 2.0, 3.0, 4.0, 5.0]),
... np.array([5.0, 4.0, 3.0, 2.0, 1.0]))
0.07272728
"""
def __init__(self,
size_average=True,
bigdl_type="float"):
super(CosineDistanceCriterion, self).__init__(None, bigdl_type,
size_average)
class CosineEmbeddingCriterion(Criterion):
"""
Creates a criterion that measures the loss given an input x = {x1, x2},
a table of two Tensors, and a Tensor label y with values 1 or -1.
:param margin: a number from -1 to 1, 0 to 0.5 is suggested
>>> cosineEmbeddingCriterion = CosineEmbeddingCriterion(1e-5, True)
creating: createCosineEmbeddingCriterion
>>> cosineEmbeddingCriterion.forward([np.array([1.0, 2.0, 3.0, 4.0, 5.0]),
... np.array([5.0, 4.0, 3.0, 2.0, 1.0])],
... [np.ones(5)])
0.0
"""
def __init__(self,
margin=0.0,
size_average=True,
bigdl_type="float"):
super(CosineEmbeddingCriterion, self).__init__(None, bigdl_type,
margin,
size_average)
class DistKLDivCriterion(Criterion):
'''
The Kullback-Leibler divergence criterion
:param sizeAverage:
>>> distKLDivCriterion = DistKLDivCriterion(True)
creating: createDistKLDivCriterion
'''
def __init__(self,
size_average=True,
bigdl_type="float"):
super(DistKLDivCriterion, self).__init__(None, bigdl_type,
size_average)
class CategoricalCrossEntropy(Criterion):
"""
This criterion is same with cross entropy criterion, except it takes a one-hot format target
tensor
>>> cce = CategoricalCrossEntropy()
creating: createCategoricalCrossEntropy
"""
def __init__(self, bigdl_type="float"):
super(CategoricalCrossEntropy, self).__init__(None, bigdl_type)
class HingeEmbeddingCriterion(Criterion):
'''
Creates a criterion that measures the loss given an
input x which is a 1-dimensional vector and a label y (1 or -1).
This is usually used for measuring whether two inputs are similar
or dissimilar,
e.g. using the L1 pairwise distance, and is typically used for
learning nonlinear embeddings or semi-supervised learning.
If x and y are n-dimensional Tensors, the sum operation still operates
over all the elements, and divides by n (this can be avoided if one sets
the internal variable sizeAverage to false). The margin has a default
value of 1, or can be set in the constructor.
>>> hingeEmbeddingCriterion = HingeEmbeddingCriterion(1e-5, True)
creating: createHingeEmbeddingCriterion
'''
def __init__(self,
margin=1.0,
size_average=True,
bigdl_type="float"):
super(HingeEmbeddingCriterion, self).__init__(None, bigdl_type,
margin,
size_average)
class L1HingeEmbeddingCriterion(Criterion):
'''
Creates a criterion that measures the loss given an input x = {x1, x2},
a table of two Tensors, and a label y (1 or -1):
:param margin:
>>> l1HingeEmbeddingCriterion = L1HingeEmbeddingCriterion(1e-5)
creating: createL1HingeEmbeddingCriterion
>>> l1HingeEmbeddingCriterion = L1HingeEmbeddingCriterion()
creating: createL1HingeEmbeddingCriterion
>>> input1 = np.array([2.1, -2.2])
>>> input2 = np.array([-0.55, 0.298])
>>> input = [input1, input2]
>>> target = np.array([1.0])
>>> result = l1HingeEmbeddingCriterion.forward(input, target)
>>> (result == 5.148)
True
'''
def __init__(self,
margin=1.0,
bigdl_type="float"):
super(L1HingeEmbeddingCriterion, self).__init__(None, bigdl_type,
margin)
class MarginCriterion(Criterion):
'''
Creates a criterion that optimizes a two-class classification hinge loss (margin-based loss)
between input x (a Tensor of dimension 1) and output y.
When margin = 1, size_average = True and squared = False, this is the same as hinge loss in keras;
When margin = 1, size_average = False and squared = True, this is the same as squared_hinge loss in keras.
:param margin: if unspecified, is by default 1.
:param size_average: size average in a mini-batch
:param squared: whether to calculate the squared hinge loss
>>> marginCriterion = MarginCriterion(1e-5, True, False)
creating: createMarginCriterion
'''
def __init__(self,
margin=1.0,
size_average=True,
squared=False,
bigdl_type="float"):
super(MarginCriterion, self).__init__(None, bigdl_type,
margin,
size_average,
squared)
class MarginRankingCriterion(Criterion):
'''
Creates a criterion that measures the loss given an input x = {x1, x2},
a table of two Tensors of size 1 (they contain only scalars), and a label y (1 or -1).
In batch mode, x is a table of two Tensors of size batchsize, and y is a Tensor of size
batchsize containing 1 or -1 for each corresponding pair of elements in the input Tensor.
If y == 1 then it assumed the first input should be ranked higher (have a larger value) than
the second input, and vice-versa for y == -1.
:param margin:
>>> marginRankingCriterion = MarginRankingCriterion(1e-5, True)
creating: createMarginRankingCriterion
'''
def __init__(self,
margin=1.0,
size_average=True,
bigdl_type="float"):
super(MarginRankingCriterion, self).__init__(None, bigdl_type,
margin,
size_average)
class MultiCriterion(Criterion):
'''
a weighted sum of other criterions each applied to the same input and target
>>> multiCriterion = MultiCriterion()
creating: createMultiCriterion
>>> mSECriterion = MSECriterion()
creating: createMSECriterion
>>> multiCriterion = multiCriterion.add(mSECriterion)
>>> multiCriterion = multiCriterion.add(mSECriterion)
'''
def __init__(self,
bigdl_type="float"):
super(MultiCriterion, self).__init__(None, bigdl_type)
def add(self, criterion, weight=1.0):
self.value.add(criterion.value, weight)
return self
class MultiLabelMarginCriterion(Criterion):
'''
Creates a criterion that optimizes a multi-class multi-classification hinge loss (
margin-based loss) between input x and output y (which is a Tensor of target class indices)
:param size_average: size average in a mini-batch
>>> multiLabelMarginCriterion = MultiLabelMarginCriterion(True)
creating: createMultiLabelMarginCriterion
'''
def __init__(self,
size_average=True,
bigdl_type="float"):
super(MultiLabelMarginCriterion, self).__init__(None, bigdl_type,
size_average)
class ParallelCriterion(Criterion):
'''
ParallelCriterion is a weighted sum of other criterions each applied to a different input
and target. Set repeatTarget = true to share the target for criterions.
Use add(criterion[, weight]) method to add criterion. Where weight is a scalar(default 1).
:param repeat_target: Whether to share the target for all criterions.
>>> parallelCriterion = ParallelCriterion(True)
creating: createParallelCriterion
>>> mSECriterion = MSECriterion()
creating: createMSECriterion
>>> parallelCriterion = parallelCriterion.add(mSECriterion)
>>> parallelCriterion = parallelCriterion.add(mSECriterion)
'''
def __init__(self,
repeat_target=False,
bigdl_type="float"):
super(ParallelCriterion, self).__init__(None, bigdl_type,
repeat_target)
def add(self, criterion, weight=1.0):
self.value.add(criterion.value, weight)
return self
class KLDCriterion(Criterion):
'''
Computes the KL-divergence of the input normal distribution to a standard normal distribution.
The input has to be a table. The first element of input is the mean of the distribution,
the second element of input is the log_variance of the distribution. The input distribution is
assumed to be diagonal.
>>> KLDCriterion = KLDCriterion()
creating: createKLDCriterion
'''
def __init__(self, bigdl_type="float"):
super(KLDCriterion, self).__init__(None, bigdl_type)
class GaussianCriterion(Criterion):
'''
Computes the log-likelihood of a sample x given a Gaussian distribution p.
>>> GaussianCriterion = GaussianCriterion()
creating: createGaussianCriterion
'''
def __init__(self, bigdl_type="float"):
super(GaussianCriterion, self).__init__(None, bigdl_type)
class SmoothL1Criterion(Criterion):
'''
Creates a criterion that can be thought of as a smooth version of the AbsCriterion.
It uses a squared term if the absolute element-wise error falls below 1.
It is less sensitive to outliers than the MSECriterion and in some
cases prevents exploding gradients (e.g. see "Fast R-CNN" paper by Ross Girshick).
```
| 0.5 * (x_i - y_i)^2^, if |x_i - y_i| < 1
loss(x, y) = 1/n \sum |
| |x_i - y_i| - 0.5, otherwise
```
If x and y are d-dimensional Tensors with a total of n elements,
the sum operation still operates over all the elements, and divides by n.
The division by n can be avoided if one sets the internal variable sizeAverage to false
:param size_average: whether to average the loss
>>> smoothL1Criterion = SmoothL1Criterion(True)
creating: createSmoothL1Criterion
'''
def __init__(self,
size_average=True,
bigdl_type="float"):
super(SmoothL1Criterion, self).__init__(None, bigdl_type,
size_average)
class SmoothL1CriterionWithWeights(Criterion):
'''
a smooth version of the AbsCriterion
It uses a squared term if the absolute element-wise error falls below 1.
It is less sensitive to outliers than the MSECriterion and in some cases
prevents exploding gradients (e.g. see "Fast R-CNN" paper by Ross Girshick).
```
d = (x - y) * w_in
loss(x, y, w_in, w_out)
| 0.5 * (sigma * d_i)^2 * w_out if |d_i| < 1 / sigma / sigma
= 1/n \sum |
| (|d_i| - 0.5 / sigma / sigma) * w_out otherwise
```
>>> smoothL1CriterionWithWeights = SmoothL1CriterionWithWeights(1e-5, 1)
creating: createSmoothL1CriterionWithWeights
'''
def __init__(self,
sigma,
num=0,
bigdl_type="float"):
super(SmoothL1CriterionWithWeights, self).__init__(None, bigdl_type,
sigma,
num)
class SoftmaxWithCriterion(Criterion):
'''
Computes the multinomial logistic loss for a one-of-many classification task,
passing real-valued predictions through a softmax to get a probability distribution over classes.
It should be preferred over separate SoftmaxLayer + MultinomialLogisticLossLayer
as its gradient computation is more numerically stable.
:param ignoreLabel: (optional) Specify a label value thatshould be ignored when computing the loss.
:param normalizeMode: How to normalize the output loss.
>>> softmaxWithCriterion = SoftmaxWithCriterion()
creating: createSoftmaxWithCriterion
>>> softmaxWithCriterion = SoftmaxWithCriterion(1, "FULL")
creating: createSoftmaxWithCriterion
'''
def __init__(self,
ignore_label=None,
normalize_mode="VALID",
bigdl_type="float"):
super(SoftmaxWithCriterion, self).__init__(None, bigdl_type,
ignore_label,
normalize_mode)
class TimeDistributedCriterion(Criterion):
'''
This class is intended to support inputs with 3 or more dimensions.
Apply Any Provided Criterion to every temporal slice of an input.
:param criterion: embedded criterion
:param size_average: whether to divide the sequence length
>>> td = TimeDistributedCriterion(ClassNLLCriterion())
creating: createClassNLLCriterion
creating: createTimeDistributedCriterion
'''
def __init__(self, criterion, size_average=False, bigdl_type="float"):
super(TimeDistributedCriterion, self).__init__(
None, bigdl_type, criterion, size_average)
class CrossEntropyCriterion(Criterion):
"""
This criterion combines LogSoftMax and ClassNLLCriterion in one single class.
:param weights: A tensor assigning weight to each of the classes
>>> np.random.seed(123)
>>> weights = np.random.uniform(0, 1, (2,)).astype("float32")
>>> cec = CrossEntropyCriterion(weights)
creating: createCrossEntropyCriterion
>>> cec = CrossEntropyCriterion()
creating: createCrossEntropyCriterion
"""
def __init__(self,
weights=None,
size_average=True,
bigdl_type="float"):
super(CrossEntropyCriterion, self).__init__(None, bigdl_type,
JTensor.from_ndarray(
weights),
size_average)
class BCECriterion(Criterion):
'''
Creates a criterion that measures the Binary Cross Entropy
between the target and the output
:param weights: weights for each class
:param sizeAverage: whether to average the loss or not
>>> np.random.seed(123)
>>> weights = np.random.uniform(0, 1, (2,)).astype("float32")
>>> bCECriterion = BCECriterion(weights)
creating: createBCECriterion
>>> bCECriterion = BCECriterion()
creating: createBCECriterion
'''
def __init__(self,
weights=None,
size_average=True,
bigdl_type="float"):
super(BCECriterion, self).__init__(None, bigdl_type,
JTensor.from_ndarray(weights),
size_average)
class MultiLabelSoftMarginCriterion(Criterion):
'''
A MultiLabel multiclass criterion based on sigmoid:
the loss is:
```
l(x,y) = - sum_i y[i] * log(p[i]) + (1 - y[i]) * log (1 - p[i])
```
where p[i] = exp(x[i]) / (1 + exp(x[i]))
and with weights:
```
l(x,y) = - sum_i weights[i] (y[i] * log(p[i]) + (1 - y[i]) * log (1 - p[i]))
```
>>> np.random.seed(123)
>>> weights = np.random.uniform(0, 1, (2,)).astype("float32")
>>> multiLabelSoftMarginCriterion = MultiLabelSoftMarginCriterion(weights)
creating: createMultiLabelSoftMarginCriterion
>>> multiLabelSoftMarginCriterion = MultiLabelSoftMarginCriterion()
creating: createMultiLabelSoftMarginCriterion
'''
def __init__(self,
weights=None,
size_average=True,
bigdl_type="float"):
super(MultiLabelSoftMarginCriterion, self).__init__(None, bigdl_type,
JTensor.from_ndarray(weights),
size_average)
class MultiMarginCriterion(Criterion):
'''
Creates a criterion that optimizes a multi-class classification hinge loss (margin-based loss)
between input x and output y (which is a target class index).
:param p:
:param weights:
:param margin:
:param size_average:
>>> np.random.seed(123)
>>> weights = np.random.uniform(0, 1, (2,)).astype("float32")
>>> multiMarginCriterion = MultiMarginCriterion(1,weights)
creating: createMultiMarginCriterion
>>> multiMarginCriterion = MultiMarginCriterion()
creating: createMultiMarginCriterion
'''
def __init__(self,
p=1,
weights=None,
margin=1.0,
size_average=True,
bigdl_type="float"):
super(MultiMarginCriterion, self).__init__(None, bigdl_type,
p,
JTensor.from_ndarray(weights),
margin,
size_average)
class SoftMarginCriterion(Criterion):
"""
Creates a criterion that optimizes a two-class classification logistic loss
between input x (a Tensor of dimension 1) and output y (which is a tensor
containing either 1s or -1s).
```
loss(x, y) = sum_i (log(1 + exp(-y[i]*x[i]))) / x:nElement()
```
:param sizeaverage: The normalization by the number of elements in the inputcan be disabled by setting
>>> softMarginCriterion = SoftMarginCriterion(False)
creating: createSoftMarginCriterion
>>> softMarginCriterion = SoftMarginCriterion()
creating: createSoftMarginCriterion
"""
def __init__(self,
size_average=True,
bigdl_type="float"):
super(SoftMarginCriterion, self).__init__(None, bigdl_type, size_average)
class DiceCoefficientCriterion(Criterion):
'''
The Dice-Coefficient criterion
input: Tensor,target: Tensor
```
return: 2 * (input intersection target)
1 - ----------------------------------
input union target
```
>>> diceCoefficientCriterion = DiceCoefficientCriterion(size_average = True, epsilon = 1.0)
creating: createDiceCoefficientCriterion
>>> diceCoefficientCriterion = DiceCoefficientCriterion()
creating: createDiceCoefficientCriterion
'''
def __init__(self,
size_average=True,
epsilon=1.0,
bigdl_type="float"):
super(DiceCoefficientCriterion, self).__init__(None, bigdl_type,
size_average,
epsilon)
class L1Cost(Criterion):
'''
compute L1 norm for input, and sign of input
>>> l1Cost = L1Cost()
creating: createL1Cost
'''
def __init__(self,
bigdl_type="float"):
super(L1Cost, self).__init__(None, bigdl_type)
class CosineProximityCriterion(Criterion):
'''
compute the negative of the mean cosine proximity between predictions and targets.
```
x'(i) = x(i) / sqrt(max(sum(x(i)^2), 1e-12))
y'(i) = y(i) / sqrt(max(sum(x(i)^2), 1e-12))
cosine_proximity(x, y) = sum_i(-1 * x'(i) * y'(i))
```
>>> cosineProximityCriterion = CosineProximityCriterion()
creating: createCosineProximityCriterion
'''
def __init__(self,
bigdl_type="float"):
super(CosineProximityCriterion, self).__init__(None, bigdl_type)
class MeanAbsolutePercentageCriterion(Criterion):
'''
This method is same as `mean_absolute_percentage_error` loss in keras.
It caculates diff = K.abs((y - x) / K.clip(K.abs(y), K.epsilon(), Double.MaxValue))
and return 100 * K.mean(diff) as outpout. Here, the x and y can have or not have a batch.
>>> error = MeanAbsolutePercentageCriterion()
creating: createMeanAbsolutePercentageCriterion
'''
def __init__(self,
bigdl_type="float"):
super(MeanAbsolutePercentageCriterion, self).__init__(None, bigdl_type)
class MeanSquaredLogarithmicCriterion(Criterion):
'''
This method is same as `mean_squared_logarithmic_error` loss in keras.
It calculates: first_log = K.log(K.clip(y, K.epsilon(), Double.MaxValue) + 1.)
second_log = K.log(K.clip(x, K.epsilon(), Double.MaxValue) + 1.)
and output K.mean(K.square(first_log - second_log)). Here, the x and y can have or not have a batch.
>>> error = MeanSquaredLogarithmicCriterion()
creating: createMeanSquaredLogarithmicCriterion
'''
def __init__(self,
bigdl_type="float"):
super(MeanSquaredLogarithmicCriterion, self).__init__(None, bigdl_type)
class KullbackLeiblerDivergenceCriterion(Criterion):
'''
compute Kullback Leibler DivergenceCriterion error for intput and target
This method is same as `kullback_leibler_divergence` loss in keras. Loss calculated as:
y_true = K.clip(input, K.epsilon(), 1)
y_pred = K.clip(target, K.epsilon(), 1)
and output K.sum(y_true * K.log(y_true / y_pred), axis=-1)
>>> error = KullbackLeiblerDivergenceCriterion()
creating: createKullbackLeiblerDivergenceCriterion
'''
def __init__(self,
bigdl_type="float"):
super(KullbackLeiblerDivergenceCriterion, self).__init__(None, bigdl_type)
class PoissonCriterion(Criterion):
'''
compute Poisson error for input and target, loss calculated as:
mean(input - target * K.log(input + K.epsilon()), axis=-1)
>>> error = PoissonCriterion()
creating: createPoissonCriterion
'''
def __init__(self,
bigdl_type="float"):
super(PoissonCriterion, self).__init__(None, bigdl_type)
def _test():
import doctest
from pyspark import SparkContext
from bigdl.nn import criterion
from bigdl.util.common import init_engine
from bigdl.util.common import create_spark_conf
globs = criterion.__dict__.copy()
sc = SparkContext(master="local[4]", appName="test criterion",
conf=create_spark_conf())
globs['sc'] = sc
init_engine()
(failure_count, test_count) = doctest.testmod(globs=globs,
optionflags=doctest.ELLIPSIS)
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
# coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
import os
import flake8
if flake8.__version_info__ < (3,):
from flake8.engine import get_style_guide
else:
from flake8.api.legacy import get_style_guide
cur_dir = os.path.dirname(__file__)
config_file = os.path.join(cur_dir, '..', 'tox.ini')
def run():
"""
Runs flake8 lint
:return:
A bool - if flake8 did not find any errors
"""
print('Running flake8')
flake8_style = get_style_guide(config_file=config_file)
paths = []
for root, _, filenames in os.walk('oscrypto'):
for filename in filenames:
if not filename.endswith('.py'):
continue
paths.append(os.path.join(root, filename))
report = flake8_style.check_files(paths)
success = report.total_errors == 0
if success:
print('OK')
return success
Fix run.py lint to work with flake8 2.x
# coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
import os
import flake8
if not hasattr(flake8, '__version_info__') or flake8.__version_info__ < (3,):
from flake8.engine import get_style_guide
else:
from flake8.api.legacy import get_style_guide
cur_dir = os.path.dirname(__file__)
config_file = os.path.join(cur_dir, '..', 'tox.ini')
def run():
"""
Runs flake8 lint
:return:
A bool - if flake8 did not find any errors
"""
print('Running flake8')
flake8_style = get_style_guide(config_file=config_file)
paths = []
for root, _, filenames in os.walk('oscrypto'):
for filename in filenames:
if not filename.endswith('.py'):
continue
paths.append(os.path.join(root, filename))
report = flake8_style.check_files(paths)
success = report.total_errors == 0
if success:
print('OK')
return success
|
#!/usr/bin/env python
import copy
import time
import rospy
from frame_editor.objects import *
from frame_editor.commands import *
from frame_editor.interface import Interface
from frame_editor.constructors_geometry import *
from frame_editor.constructors_std import *
from frame_editor.srv import *
class FrameEditor_Services(Interface):
def __init__(self, frame_editor):
self.editor = frame_editor
rospy.Service("align_frame", AlignFrame, self.callback_align_frame)
rospy.Service("edit_frame", EditFrame, self.callback_edit_frame)
rospy.Service("get_frame", GetFrame, self.callback_get_frame)
rospy.Service("remove_frame", RemoveFrame, self.callback_remove_frame)
rospy.Service("set_frame", SetFrame, self.callback_set_frame)
rospy.Service("set_parent", SetParentFrame, self.callback_set_parent_frame)
rospy.Service("copy_frame", CopyFrame, self.callback_copy_frame)
def callback_align_frame(self, request):
print "> Request to align frame", request.name, "with frame", request.source_name, "mode", request.mode
response = AlignFrameResponse()
response.error_code = 0
if request.name == "":
print " Error: No name given"
response.error_code = 1
elif request.source_name == "":
print " Error: No source name given"
response.error_code = 3
elif request.name not in self.editor.frames:
print " Error: Frame not found:", request.name
response.error_code = 2
else:
frame = self.editor.frames[request.name]
m = request.mode
mode = []
if m & 1: mode.append("x")
if m & 2: mode.append("y")
if m & 4: mode.append("z")
if m & 8: mode.append("a")
if m & 16: mode.append("b")
if m & 32: mode.append("c")
self.editor.command(Command_AlignElement(self.editor, frame, request.source_name, mode))
return response
def callback_edit_frame(self, request):
print "> Request to edit frame", request.name
response = EditFrameResponse()
response.error_code = 0
if request.name == "":
## Reset
self.editor.command(Command_SelectElement(self.editor, None))
elif request.name not in self.editor.frames:
print " Error: Frame not found:", request.name
response.error_code = 2
else:
## Set
self.editor.command(Command_SelectElement(self.editor, self.editor.frames[request.name]))
return response
def callback_get_frame(self, request):
print "> Request to get frame", request.name
response = GetFrameResponse()
response.error_code = 0
if request.name == "":
print " Error: No name given"
response.error_code = 1
elif request.name not in self.editor.frames:
print " Error: Frame not found:", request.name
response.error_code = 2
else:
f = self.editor.frames[request.name]
f.print_all()
response.name = f.name
response.parent = f.parent
response.pose = ToPose(f.position, f.orientation)
return response
def callback_remove_frame(self, request):
print "> Request to remove frame", request.name
response = RemoveFrameResponse()
response.error_code = 0
if request.name == "":
print " Error: No name given"
response.error_code = 1
elif request.name not in self.editor.frames:
print " Error: Frame not found:", request.name
response.error_code = 2
else:
self.editor.command(Command_RemoveElement(self.editor, self.editor.frames[request.name]))
return response
def callback_set_frame(self, request):
print "> Request to set (or add) frame", request.name, request.parent
response = SetFrameResponse()
response.error_code = 0
if request.name == "":
print " Error: No name given"
response.error_code = 1
else:
if request.parent == "":
request.parent = "world"
f = Frame(request.name,
FromPoint(request.pose.position),
FromQuaternion(request.pose.orientation),
request.parent)
self.editor.command(Command_AddElement(self.editor, f))
return response
def callback_set_parent_frame(self, request):
print "> Request to set parent_frame", request.name, request.parent
response = SetParentFrameResponse()
response.error_code = 0
if request.name == "":
print " Error: No frame_name given"
response.error_code = 1
elif request.parent == "":
print " Error: No parent_name given"
response.error_code = 2
else:
f = self.editor.frames[request.name]
self.editor.command(Command_SetParent(self.editor, f, request.parent, request.keep_absolute))
return response
def callback_copy_frame(self, request):
print "> Request to copy frame '" + request.source_name + "' with new name '" + request.name + "' and new parent name '" + request.parent + "'"
response = CopyFrameResponse()
response.error_code = 0
if request.name == "":
print " Error: No name given"
response.error_code = 1
elif request.source_name == "":
print " Error: No source name given"
response.error_code = 3
else:
t = time.time()
try:
# If not existing yet: create frame
if request.name not in self.editor.frames:
print ">> add"
# No parent specified: use source's parent
if request.parent == "":
if request.source_name in self.editor.frames:
request.parent = self.editor.frames[request.source_name].parent
else:
print " Error: No parent name given"
response.error_code = 3
return response
Frame.wait_for_transform(request.source_name, request.parent, rospy.Duration(1.0))
self.editor.command(Command_CopyElement(self.editor, request.name, request.source_name, request.parent))
Frame.wait_for_transform(request.parent, request.name, rospy.Duration(1.0))
else:
frame = self.editor.frames[request.name]
Frame.wait_for_transform(request.source_name, request.parent, rospy.Duration(1.0))
if (request.parent != "") and (frame.parent != request.parent):
print ">> rebase"
self.editor.command(Command_RebaseElement(self.editor, frame, request.source_name, request.parent))
else:
print ">> align"
self.editor.command(Command_AlignElement(self.editor, frame, request.source_name, ['x', 'y', 'z', 'a', 'b', 'c']))
Frame.wait_for_transform(frame.parent, frame.name, rospy.Duration(1.0))
except Exception, e:
print "Error: unhandled exception", e
response.error_code = 9
print time.time() - t
return response
# eof
Encapsulated ROS services into a given namespace
#!/usr/bin/env python
import copy
import time
import rospy
from frame_editor.objects import *
from frame_editor.commands import *
from frame_editor.interface import Interface
from frame_editor.constructors_geometry import *
from frame_editor.constructors_std import *
from frame_editor.srv import *
class FrameEditor_Services(Interface):
def __init__(self, frame_editor):
self.editor = frame_editor
rospy.Service("~align_frame", AlignFrame, self.callback_align_frame)
rospy.Service("~edit_frame", EditFrame, self.callback_edit_frame)
rospy.Service("~get_frame", GetFrame, self.callback_get_frame)
rospy.Service("~remove_frame", RemoveFrame, self.callback_remove_frame)
rospy.Service("~set_frame", SetFrame, self.callback_set_frame)
rospy.Service("~set_parent", SetParentFrame, self.callback_set_parent_frame)
rospy.Service("~copy_frame", CopyFrame, self.callback_copy_frame)
def callback_align_frame(self, request):
print "> Request to align frame", request.name, "with frame", request.source_name, "mode", request.mode
response = AlignFrameResponse()
response.error_code = 0
if request.name == "":
print " Error: No name given"
response.error_code = 1
elif request.source_name == "":
print " Error: No source name given"
response.error_code = 3
elif request.name not in self.editor.frames:
print " Error: Frame not found:", request.name
response.error_code = 2
else:
frame = self.editor.frames[request.name]
m = request.mode
mode = []
if m & 1: mode.append("x")
if m & 2: mode.append("y")
if m & 4: mode.append("z")
if m & 8: mode.append("a")
if m & 16: mode.append("b")
if m & 32: mode.append("c")
self.editor.command(Command_AlignElement(self.editor, frame, request.source_name, mode))
return response
def callback_edit_frame(self, request):
print "> Request to edit frame", request.name
response = EditFrameResponse()
response.error_code = 0
if request.name == "":
## Reset
self.editor.command(Command_SelectElement(self.editor, None))
elif request.name not in self.editor.frames:
print " Error: Frame not found:", request.name
response.error_code = 2
else:
## Set
self.editor.command(Command_SelectElement(self.editor, self.editor.frames[request.name]))
return response
def callback_get_frame(self, request):
print "> Request to get frame", request.name
response = GetFrameResponse()
response.error_code = 0
if request.name == "":
print " Error: No name given"
response.error_code = 1
elif request.name not in self.editor.frames:
print " Error: Frame not found:", request.name
response.error_code = 2
else:
f = self.editor.frames[request.name]
f.print_all()
response.name = f.name
response.parent = f.parent
response.pose = ToPose(f.position, f.orientation)
return response
def callback_remove_frame(self, request):
print "> Request to remove frame", request.name
response = RemoveFrameResponse()
response.error_code = 0
if request.name == "":
print " Error: No name given"
response.error_code = 1
elif request.name not in self.editor.frames:
print " Error: Frame not found:", request.name
response.error_code = 2
else:
self.editor.command(Command_RemoveElement(self.editor, self.editor.frames[request.name]))
return response
def callback_set_frame(self, request):
print "> Request to set (or add) frame", request.name, request.parent
response = SetFrameResponse()
response.error_code = 0
if request.name == "":
print " Error: No name given"
response.error_code = 1
else:
if request.parent == "":
request.parent = "world"
f = Frame(request.name,
FromPoint(request.pose.position),
FromQuaternion(request.pose.orientation),
request.parent)
self.editor.command(Command_AddElement(self.editor, f))
return response
def callback_set_parent_frame(self, request):
print "> Request to set parent_frame", request.name, request.parent
response = SetParentFrameResponse()
response.error_code = 0
if request.name == "":
print " Error: No frame_name given"
response.error_code = 1
elif request.parent == "":
print " Error: No parent_name given"
response.error_code = 2
else:
f = self.editor.frames[request.name]
self.editor.command(Command_SetParent(self.editor, f, request.parent, request.keep_absolute))
return response
def callback_copy_frame(self, request):
print "> Request to copy frame '" + request.source_name + "' with new name '" + request.name + "' and new parent name '" + request.parent + "'"
response = CopyFrameResponse()
response.error_code = 0
if request.name == "":
print " Error: No name given"
response.error_code = 1
elif request.source_name == "":
print " Error: No source name given"
response.error_code = 3
else:
t = time.time()
try:
# If not existing yet: create frame
if request.name not in self.editor.frames:
print ">> add"
# No parent specified: use source's parent
if request.parent == "":
if request.source_name in self.editor.frames:
request.parent = self.editor.frames[request.source_name].parent
else:
print " Error: No parent name given"
response.error_code = 3
return response
Frame.wait_for_transform(request.source_name, request.parent, rospy.Duration(1.0))
self.editor.command(Command_CopyElement(self.editor, request.name, request.source_name, request.parent))
Frame.wait_for_transform(request.parent, request.name, rospy.Duration(1.0))
else:
frame = self.editor.frames[request.name]
Frame.wait_for_transform(request.source_name, request.parent, rospy.Duration(1.0))
if (request.parent != "") and (frame.parent != request.parent):
print ">> rebase"
self.editor.command(Command_RebaseElement(self.editor, frame, request.source_name, request.parent))
else:
print ">> align"
self.editor.command(Command_AlignElement(self.editor, frame, request.source_name, ['x', 'y', 'z', 'a', 'b', 'c']))
Frame.wait_for_transform(frame.parent, frame.name, rospy.Duration(1.0))
except Exception, e:
print "Error: unhandled exception", e
response.error_code = 9
print time.time() - t
return response
# eof
|
from functools import partial
import pytest
from plenum.common.messages.internal_messages import NeedViewChange
from plenum.server.consensus.batch_id import BatchID
from plenum.test.consensus.view_change.helper import some_pool
from plenum.test.helper import MockNetwork
from plenum.test.simulation.sim_random import SimRandom, DefaultSimRandom
def check_view_change_completes_under_normal_conditions(random: SimRandom):
# Create random pool with random initial state
pool, committed = some_pool(random)
# Schedule view change at different time on all nodes
for node in pool.nodes:
pool.timer.schedule(random.integer(0, 10000),
partial(node._view_changer.process_need_view_change, NeedViewChange()))
# Make sure all nodes complete view change
pool.timer.wait_for(lambda: all(not node._data.waiting_for_new_view
and node._data.view_no > 0
for node in pool.nodes))
# Make sure all nodes end up in same state
for node_a, node_b in zip(pool.nodes, pool.nodes[1:]):
assert node_a._data.view_no == node_b._data.view_no
assert node_a._data.primary_name == node_b._data.primary_name
assert node_a._data.stable_checkpoint == node_b._data.stable_checkpoint
assert node_a._data.preprepared == node_b._data.preprepared
# Make sure that all committed reqs are ordered with the same ppSeqNo in the new view:
stable_checkpoint = pool.nodes[0]._data.stable_checkpoint
committed = [c for c in committed if c.pp_seq_no > stable_checkpoint]
for n in pool.nodes:
assert committed == n._data.preprepared[:len(committed)]
def calc_committed(view_changes):
committed = []
for pp_seq_no in range(1, 50):
batch_id = None
for vc in view_changes:
# pp_seq_no must be present in all PrePrepares
for pp in vc.preprepared:
if pp[2] == pp_seq_no:
if batch_id is None:
batch_id = pp
assert batch_id == pp
break
# pp_seq_no must be present in all Prepares
if batch_id not in vc.prepared:
return committed
committed.append(BatchID(*batch_id))
return committed
@pytest.mark.parametrize("seed", range(200))
def test_view_change_completes_under_normal_conditions(seed):
random = DefaultSimRandom(seed)
check_view_change_completes_under_normal_conditions(random)
def test_new_view_combinations(random):
# Create pool in some random initial state
pool, _ = some_pool(random)
quorums = pool.nodes[0]._data.quorums
# Get view change votes from all nodes
view_change_messages = []
for node in pool.nodes:
network = MockNetwork()
node._view_changer._network = network
node._view_changer._bus.send(NeedViewChange())
view_change_messages.append(network.sent_messages[0][0])
# Check that all committed requests are present in final batches
for _ in range(10):
num_votes = quorums.strong.value
votes = random.sample(view_change_messages, num_votes)
cp = pool.nodes[0]._view_changer._new_view_builder.calc_checkpoint(votes)
assert cp is not None
batches = pool.nodes[0]._view_changer._new_view_builder.calc_batches(cp, votes)
committed = calc_committed(votes)
committed = [c for c in committed if c.pp_seq_no > cp.seqNoEnd]
assert batches is not None
assert committed == batches[:len(committed)]
INDY-2223: modify test_view_change_completes_under_normal_conditions
Signed-off-by: toktar <693efcc3eefc70dd401303f85580b26042b61007@dsr-corporation.com>
from functools import partial
import pytest
from plenum.common.config_util import getConfig
from plenum.common.messages.internal_messages import NeedViewChange
from plenum.common.messages.node_messages import ViewChange, NewView, ViewChangeAck
from plenum.server.consensus.batch_id import BatchID
from plenum.server.replica_helper import getNodeName
from plenum.test.consensus.view_change.helper import some_pool
from plenum.test.helper import MockNetwork
from plenum.test.simulation.sim_random import SimRandom, DefaultSimRandom
def check_view_change_completes_under_normal_conditions(random: SimRandom, pool_committed=None):
# Create random pool with random initial state
pool, committed = some_pool(random) if pool_committed is None else pool_committed
# Schedule view change at different time on all nodes
for node in pool.nodes:
pool.timer.schedule(random.integer(0, 10000),
partial(node._view_changer.process_need_view_change, NeedViewChange()))
# Make sure all nodes complete view change
pool.timer.wait_for(lambda: all(not node._data.waiting_for_new_view
and node._data.view_no > 0
for node in pool.nodes))
# Make sure all nodes end up in same state
for node_a, node_b in zip(pool.nodes, pool.nodes[1:]):
assert node_a._data.view_no == node_b._data.view_no
assert node_a._data.primary_name == node_b._data.primary_name
assert node_a._data.stable_checkpoint == node_b._data.stable_checkpoint
assert node_a._data.preprepared == node_b._data.preprepared
# Make sure that all committed reqs are ordered with the same ppSeqNo in the new view:
stable_checkpoint = pool.nodes[0]._data.stable_checkpoint
committed = [c for c in committed if c.pp_seq_no > stable_checkpoint]
for n in pool.nodes:
assert committed == n._data.preprepared[:len(committed)]
def calc_committed(view_changes):
committed = []
for pp_seq_no in range(1, 50):
batch_id = None
for vc in view_changes:
# pp_seq_no must be present in all PrePrepares
for pp in vc.preprepared:
if pp[2] == pp_seq_no:
if batch_id is None:
batch_id = pp
assert batch_id == pp
break
# pp_seq_no must be present in all Prepares
if batch_id not in vc.prepared:
return committed
committed.append(BatchID(*batch_id))
return committed
@pytest.fixture(params=[(0, 0.6), (1, 2)])
def set_latency(pool_committed, request, tconf):
min, max = tuple(int(param * tconf.NEW_VIEW_TIMEOUT) for param in request.param)
pool_committed[0].network.set_latency(min, max)
@pytest.fixture(params=range(200))
def new_random(request):
seed = request.param
return DefaultSimRandom(seed)
@pytest.fixture()
def pool_committed(new_random):
pool, committed = some_pool(new_random)
# pool.network.set_filter([getNodeName(pool.nodes[-1].name)],
# [ViewChange, NewView, ViewChangeAck])
return pool, committed
def test_view_change_completes_under_normal_conditions(new_random, pool_committed, set_latency):
check_view_change_completes_under_normal_conditions(new_random, pool_committed)
def test_new_view_combinations(random):
# Create pool in some random initial state
pool, _ = some_pool(random)
quorums = pool.nodes[0]._data.quorums
# Get view change votes from all nodes
view_change_messages = []
for node in pool.nodes:
network = MockNetwork()
node._view_changer._network = network
node._view_changer._bus.send(NeedViewChange())
view_change_messages.append(network.sent_messages[0][0])
# Check that all committed requests are present in final batches
for _ in range(10):
num_votes = quorums.strong.value
votes = random.sample(view_change_messages, num_votes)
cp = pool.nodes[0]._view_changer._new_view_builder.calc_checkpoint(votes)
assert cp is not None
batches = pool.nodes[0]._view_changer._new_view_builder.calc_batches(cp, votes)
committed = calc_committed(votes)
committed = [c for c in committed if c.pp_seq_no > cp.seqNoEnd]
assert batches is not None
assert committed == batches[:len(committed)]
|
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""Submits jenkins steeplechase WebRTC test results to treeherder"""
from ConfigParser import ConfigParser
import glob
import hashlib
import json
import os
from sys import argv
import socket
import uuid
import argparse
import re
from thclient import TreeherderJobCollection
from thclient import TreeherderRequest
from thclient import TreeherderResultSetCollection
import sclogparse
def create_revision_hash():
sha = hashlib.sha1()
sha.update(str(uuid.uuid4()))
return sha.hexdigest()
def get_config():
parser = argparse.ArgumentParser(description='Jenkins Steeplechase Treeherder Results Parser')
parser.add_argument('--package', required=True)
parser.add_argument('--package2', default=None)
parser.add_argument('--submit-time', required=True, type=int, dest='submit_time')
parser.add_argument('--start-time', required=True, type=int, dest='start_time')
parser.add_argument('--end-time', required=True, type=int, dest='end_time')
parser.add_argument('--steeplechase-log', required=True, dest='steeplechase_log')
args = parser.parse_args()
pfi = platform_info(args.package)
pfi['package'] = args.package
if args.package2:
pfi['package2'] = args.package2
else:
pfi['package2'] = args.package
my_dir = os.path.dirname(os.path.realpath(argv[0]))
my_ini = os.path.join(my_dir, 'jenkinsherder.ini')
cp = ConfigParser()
cp.read(my_ini)
config = {}
config['treeherder'] = {}
config['treeherder']['credentials'] = dict(cp.items('Credentials'))
config['treeherder']['repo'] = dict(cp.items('Repo'))
config['system'] = dict(cp.items('System'))
config['times'] = {}
config['times']['submit_time'] = args.submit_time
config['times']['start_time'] = args.start_time
config['times']['end_time'] = args.end_time
config['platform_info'] = pfi
config['files'] = {}
config['files']['steeplechase_log'] = args.steeplechase_log
return config
def platform_info(package):
base_name, file = os.path.split(package)
exp = re.compile(r"^firefox-latest-([^\.]+)\.en-US\.([^\.]+)\.(.*)$")
match = exp.match(file)
release = match.group(1)
whole_platform = match.group(2)
extension = match.group(3)
arch_exp = re.compile(r"^([^\.]+)-(.*)$")
arch_match = exp.match(whole_platform)
if arch_match:
platform = arch_match.group(1)
architecture = arch_match.group(2)
else:
platform = whole_platform
architecture = ''
if platform == 'linux':
os_name = 'linux'
elif platform == 'mac':
os_name = 'mac'
elif platform == 'win32':
os_name = 'win'
elif platform == 'win64':
os_name = 'win'
build_file = open(os.path.join(base_name, 'firefox-latest-%s.en-US.%s.txt' % (release, whole_platform)), 'r')
buildid = build_file.readline().rstrip("\r\n")
repo_line = build_file.readline().rstrip("\r\n")
repo_exp = re.compile(r"^https://(.*/)rev/(.*)$")
repo_match = repo_exp.match(repo_line)
repo = repo_match.group(1)
rev = repo_match.group(2)
build_file.close()
return { 'platform': platform, 'os_name': os_name, 'architecture': architecture, 'release': release, 'buildid': buildid, 'repo': repo, 'rev': rev }
def get_app_information(config):
repo = config['platform_info']['repo']
rev = config['platform_info']['rev']
return rev, repo
def get_files(config):
return config['platform_info']['package'], config['platform_info']['package2']
def get_buildid(config):
return config['platform_info']['buildid']
def get_result_summary(results):
def add_line(title, value):
summary['job_details'].append({
'title': title,
'value': str(value),
'content_type': 'text'})
summary = {'job_details': []}
add_line('Total Failed', results['total failed'])
add_line('Total Passed', results['total passed'])
for client in results['clients']:
name = client['name']
add_line(name + ' Total Blocks', client['blocks'])
add_line(name + ' Failed Blocks', len(client['failed blocks']))
add_line(name + ' Session Failures', len(client['session failures']))
add_line(name + ' Setup Failures', len(client['setup failures']))
add_line(name + ' Cleanup Failures', len(client['cleanup failures']))
return summary
def get_result_string(results):
if (results['total failed'] is None or
results['total passed'] is None):
return 'busted'
passed = True
for client in results['clients']:
passed = (passed and len(client['setup failures']) == 0
and len(client['cleanup failures']) == 0
and len(client['session failures']) == 0
and len(client['failed blocks']) < 20)
if not passed:
break
if passed:
return 'success'
else:
return 'testfailed'
def main():
config = get_config()
app_revision, app_repository = get_app_information(config)
files = get_files(config)
push_time = int(os.stat(files[0]).st_ctime)
results = sclogparse.parse(config['files']['steeplechase_log'])
result_set_hash = create_revision_hash()
trsc = TreeherderResultSetCollection()
trs = trsc.get_resultset()
trs.add_revision_hash(result_set_hash)
author = 'Firefox %s' % (config['platform_info']['release'].title())
trs.add_author(author)
trs.add_push_timestamp(push_time)
tr = trs.get_revision()
tr.add_revision(app_revision)
tr.add_author(author)
tr.add_comment(get_buildid(config))
tr.add_files([os.path.basename(f) for f in files])
tr.add_repository(app_repository)
trs.add_revision(tr)
trsc.add(trs)
tjc = TreeherderJobCollection()
tj = tjc.get_job()
tj.add_revision_hash(result_set_hash)
tj.add_project(config['treeherder']['repo']['project'])
tj.add_job_guid(str(uuid.uuid4()))
tj.add_group_name('WebRTC QA Tests')
tj.add_group_symbol('WebRTC')
tj.add_job_name('Sanity')
tj.add_job_symbol('end')
tj.add_build_info(config['platform_info']['os_name'], config['platform_info']['platform'], config['platform_info']['architecture'])
tj.add_machine_info(config['platform_info']['os_name'], config['platform_info']['platform'], config['platform_info']['architecture'])
tj.add_description('WebRTC Jenkins')
tj.add_option_collection({'opt': True}) # must not be {}!
tj.add_reason('testing')
tj.add_who('Mozilla Platform QA')
tj.add_submit_timestamp(config['times']['submit_time'])
tj.add_start_timestamp(config['times']['start_time'])
tj.add_end_timestamp(config['times']['end_time'])
tj.add_state('completed')
tj.add_machine(socket.gethostname())
result_string = get_result_string(results)
tj.add_result(result_string)
if result_string != 'busted':
summary = get_result_summary(results)
tj.add_artifact('Job Info', 'json', summary)
tj.add_artifact('Results', 'json', results)
tjc.add(tj)
print 'trsc = ' + json.dumps(json.loads(trsc.to_json()), sort_keys=True,
indent=4, separators=(',', ': '))
print 'tjc = ' + json.dumps(json.loads(tjc.to_json()), sort_keys=True,
indent=4, separators=(',', ': '))
req = TreeherderRequest(
protocol='http',
host=config['treeherder']['repo']['host'],
project=config['treeherder']['repo']['project'],
oauth_key=config['treeherder']['credentials']['key'],
oauth_secret=config['treeherder']['credentials']['secret']
)
req.post(trsc)
req.post(tjc)
if __name__ == '__main__':
main()
Various bug fixes. Had to add arguments for machine names and architectures.
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""Submits jenkins steeplechase WebRTC test results to treeherder"""
from ConfigParser import ConfigParser
import glob
import hashlib
import json
import os
from sys import argv
import socket
import uuid
import argparse
import re
from thclient import TreeherderJobCollection
from thclient import TreeherderRequest
from thclient import TreeherderResultSetCollection
import sclogparse
def create_revision_hash():
sha = hashlib.sha1()
sha.update(str(uuid.uuid4()))
return sha.hexdigest()
def get_config():
parser = argparse.ArgumentParser(description='Jenkins Steeplechase Treeherder Results Parser')
parser.add_argument('--package', required=True)
parser.add_argument('--package2', default=None)
parser.add_argument('--submit-time', required=True, type=int, dest='submit_time')
parser.add_argument('--start-time', required=True, type=int, dest='start_time')
parser.add_argument('--end-time', required=True, type=int, dest='end_time')
parser.add_argument('--steeplechase-log', required=True, dest='steeplechase_log')
parser.add_argument('--machine1', required=True, dest='machine1')
parser.add_argument('--machine2', required=True, dest='machine2')
parser.add_argument('--arch1', required=True, dest='arch1')
parser.add_argument('--arch2', required=True, dest='arch2')
args = parser.parse_args()
pfi = platform_info(args.package, args.arch1, args.machine1)
if args.package2:
package2 = args.package2
else:
package2 = args.package
pfi2 = platform_info(package2, args.arch2, args.machine2)
my_dir = os.path.dirname(os.path.realpath(argv[0]))
my_ini = os.path.join(my_dir, 'jenkinsherder.ini')
cp = ConfigParser()
cp.read(my_ini)
config = {}
config['treeherder'] = {}
config['treeherder']['credentials'] = dict(cp.items('Credentials'))
config['treeherder']['repo'] = dict(cp.items('Repo'))
config['system'] = dict(cp.items('System'))
config['times'] = {}
config['times']['submit_time'] = args.submit_time
config['times']['start_time'] = args.start_time
config['times']['end_time'] = args.end_time
config['platform_info'] = pfi
config['platform_info2'] = pfi2
config['files'] = {}
config['files']['steeplechase_log'] = args.steeplechase_log
return config
def platform_info(package, arch, machine):
base_name, file = os.path.split(package)
exp = re.compile(r"^firefox-latest-([^\.]+)\.en-US\.([^\.]+)\.(.*)$")
match = exp.match(file)
release = match.group(1)
whole_platform = match.group(2)
extension = match.group(3)
arch_exp = re.compile(r"^([^\.]+)-(.*)$")
arch_match = exp.match(whole_platform)
if arch_match:
platform = arch_match.group(1)
else:
platform = whole_platform
if platform == 'linux':
os_name = 'linux'
elif platform == 'mac':
os_name = 'mac'
elif platform == 'win32':
os_name = 'win'
elif platform == 'win64':
os_name = 'win'
build_file = open(os.path.join(base_name, 'firefox-latest-%s.en-US.%s.txt' % (release, whole_platform)), 'r')
buildid = build_file.readline().rstrip("\r\n")
repo_line = build_file.readline().rstrip("\r\n")
repo_exp = re.compile(r"^https://(.*/)rev/(.*)$")
repo_match = repo_exp.match(repo_line)
repo = repo_match.group(1)
rev = repo_match.group(2)
build_file.close()
return { 'package': package, 'platform': platform, 'os_name': os_name, 'architecture': arch, 'release': release, 'buildid': buildid, 'repo': repo, 'rev': rev, 'machine': machine }
def get_app_information(config):
repo = config['platform_info']['repo']
rev = config['platform_info']['rev']
return rev, repo
def get_files(config):
return config['platform_info']['package'], config['platform_info2']['package']
def get_buildid(config):
return config['platform_info']['buildid']
def get_result_summary(results):
def add_line(title, value):
summary['job_details'].append({
'title': title,
'value': str(value),
'content_type': 'text'})
summary = {'job_details': []}
add_line('Total Failed', results['total failed'])
add_line('Total Passed', results['total passed'])
for client in results['clients']:
name = client['name']
add_line(name + ' Total Blocks', client['blocks'])
add_line(name + ' Failed Blocks', len(client['failed blocks']))
add_line(name + ' Session Failures', len(client['session failures']))
add_line(name + ' Setup Failures', len(client['setup failures']))
add_line(name + ' Cleanup Failures', len(client['cleanup failures']))
return summary
def get_result_string(results):
if (results['total failed'] is None or
results['total passed'] is None):
return 'busted'
passed = True
for client in results['clients']:
passed = (passed and len(client['setup failures']) == 0
and len(client['cleanup failures']) == 0
and len(client['session failures']) == 0
and len(client['failed blocks']) < 20)
if not passed:
break
if passed:
return 'success'
else:
return 'testfailed'
def main():
config = get_config()
app_revision, app_repository = get_app_information(config)
files = get_files(config)
push_time = int(os.stat(files[0]).st_ctime)
results = sclogparse.parse(config['files']['steeplechase_log'])
result_set_hash = create_revision_hash()
trsc = TreeherderResultSetCollection()
trs = trsc.get_resultset()
trs.add_revision_hash(result_set_hash)
author = 'Firefox %s' % (config['platform_info']['release'].title())
trs.add_author(author)
trs.add_push_timestamp(push_time)
tr = trs.get_revision()
tr.add_revision(app_revision)
tr.add_author(author)
tr.add_comment(get_buildid(config))
tr.add_files([os.path.basename(f) for f in files])
tr.add_repository(app_repository)
trs.add_revision(tr)
trsc.add(trs)
tjc = TreeherderJobCollection()
tj = tjc.get_job()
tj.add_revision_hash(result_set_hash)
tj.add_project(config['treeherder']['repo']['project'])
tj.add_job_guid(str(uuid.uuid4()))
tj.add_group_name('WebRTC QA Tests')
tj.add_group_symbol('WebRTC')
tj.add_job_name('Sanity')
tj.add_job_symbol('end')
tj.add_build_info(config['platform_info']['os_name'], config['platform_info']['platform'], config['platform_info']['architecture'])
tj.add_machine_info(config['platform_info']['os_name'], config['platform_info']['platform'], config['platform_info']['architecture'])
tj.add_description('WebRTC Jenkins')
tj.add_option_collection({'opt': True}) # must not be {}!
tj.add_reason('testing')
tj.add_who('Mozilla Platform QA')
tj.add_submit_timestamp(config['times']['submit_time'])
tj.add_start_timestamp(config['times']['start_time'])
tj.add_end_timestamp(config['times']['end_time'])
tj.add_state('completed')
tj.add_machine(config['platform_info']['machine'])
result_string = get_result_string(results)
tj.add_result(result_string)
if result_string != 'busted':
summary = get_result_summary(results)
tj.add_artifact('Job Info', 'json', summary)
tj.add_artifact('Results', 'json', results)
tjc.add(tj)
print 'trsc = ' + json.dumps(json.loads(trsc.to_json()), sort_keys=True,
indent=4, separators=(',', ': '))
print 'tjc = ' + json.dumps(json.loads(tjc.to_json()), sort_keys=True,
indent=4, separators=(',', ': '))
req = TreeherderRequest(
protocol='http',
host=config['treeherder']['repo']['host'],
project=config['treeherder']['repo']['project'],
oauth_key=config['treeherder']['credentials']['key'],
oauth_secret=config['treeherder']['credentials']['secret']
)
req.post(trsc)
req.post(tjc)
if __name__ == '__main__':
main()
|
"""
This module provides means to detect the App Engine environment.
"""
import os
def is_appengine():
return "APPENGINE_RUNTIME" in os.environ
def is_appengine_sandbox():
"""Reports if the app is running in the first generation sandbox.
The second generation runtimes are technically still in a sandbox, but it
is much less restrictive, so generally you shouldn't need to check for it.
see https://cloud.google.com/appengine/docs/standard/runtimes
"""
return is_appengine() and os.environ["APPENGINE_RUNTIME"] == "python27"
def is_local_appengine():
return is_appengine() and os.environ.get("SERVER_SOFTWARE", "").startswith(
"Development/"
)
def is_prod_appengine():
return is_appengine() and os.environ.get("SERVER_SOFTWARE", "").startswith(
"Google App Engine/"
)
def is_prod_appengine_mvms():
"""Deprecated."""
return False
Revert behavior to is_appengine=False in testbed (#1760)
Whether testbed tests "are appengine" is debatable, but historically
this function has returned False in testbed tests. This behavior was
inadvertently (and unnecessarily) changed in PR #1704. This commit
undoes that regression for testbed tests.
"""
This module provides means to detect the App Engine environment.
"""
import os
def is_appengine():
return is_local_appengine() or is_prod_appengine()
def is_appengine_sandbox():
"""Reports if the app is running in the first generation sandbox.
The second generation runtimes are technically still in a sandbox, but it
is much less restrictive, so generally you shouldn't need to check for it.
see https://cloud.google.com/appengine/docs/standard/runtimes
"""
return is_appengine() and os.environ["APPENGINE_RUNTIME"] == "python27"
def is_local_appengine():
return "APPENGINE_RUNTIME" in os.environ and os.environ.get(
"SERVER_SOFTWARE", ""
).startswith("Development/")
def is_prod_appengine():
return "APPENGINE_RUNTIME" in os.environ and os.environ.get(
"SERVER_SOFTWARE", ""
).startswith("Google App Engine/")
def is_prod_appengine_mvms():
"""Deprecated."""
return False
|
# -*- coding: utf-8 -*-
"""Universal Language Codes
This library aims to provide an [en/de]coding utility for language codes.
To get a "universal" language code you create a LanguageCode object, giving it
the code and the standard it should use to look up that code.
>>> lc = LanguageCode('en', 'iso-639-1')
Internally the code is stored in a custom standard designed specifically for
this purpose. It doesn't have any use in the real world, so to get a useful
representation out you "encode" the code:
>>> lc.encode('iso-639-2')
'eng'
This is similar to Python's handling of Unicode and byte strings:
>>> s = 'Hello, world'.decode('ascii')
>>> s
u'Hello, world'
>>> s.encode('utf-8')
'Hello, world'
"""
import copy
from .bcp47.converter import (
StrictBCP47ToUnilangConverter, LossyBCP47ToUnilangConverter,
UNILANGS_TO_BCP47
)
def _reverse_dict(d):
return dict([(v, k) for k, v in d.items()])
# INTERNAL_NAMES stores the English and native names for the various languages.
#
# Code -> (English name, Native name)
#
# { 'ar': (u'Arabic', u'العربية'),
# 'el': (u'Greek', u'Ελληνικά'),
# ... }
INTERNAL_NAMES = {}
# TO_INTERNAL is a dict of dicts.
#
# The first level is a mapping of standard names to their dicts.
#
# The second level (one per standard) is a mapping of that standard's language
# codes to the internal language codes.
#
# { 'iso-639-1': {
# 'ak': 'aka',
# ...
# },
# ... }
TO_INTERNAL = {}
# FROM_INTERNAL is a dict of dicts.
#
# The first level is a mapping of standard names to their dicts.
#
# The second level (one per standard) is a mapping of internal language codes
# to that standard's language codes.
#
# { 'iso-639-1': {
# 'ak': 'aka',
# ...
# },
# ... }
FROM_INTERNAL = {}
gettext_noop = lambda s: s
def add_standard(standard, mapping, base=None, exclude=None):
"""Add a new standard to the list of supported standards.
`mapping` should be a dictionary mapping your custom standard's codes to the
internal "universal" code used by this library.
`base` is optional. If given it will use the given standard as a base and
copy all of its mappings before updating it with the ones you pass in
through `mappings`.
This can be useful for creating your own custom standard that's mostly like
an existing one except for a few changes:
>>> add_standard('my-standard', {'american': 'en'}, base='iso-639-1')
This example creates a new custom standard, which is pretty much like
ISO-639-1 but adds a code called 'american' that represents the English
language. Now you can do:
>>> lc = LanguageCode('american', 'my-standard')
>>> lc.encode('iso-639-2')
'en'
You can pass a list of codes to exclude from the base through the `exclude`
parameter:
>>> add_standard('my-standard', {'american': 'en'},
base='iso-639-1', exclude=('no', 'en'))
"""
if base:
m = copy.copy(TO_INTERNAL[base])
m.update(mapping)
if exclude:
for c in exclude:
del m[c]
else:
m = mapping
TO_INTERNAL[standard] = m
FROM_INTERNAL[standard] = _reverse_dict(m)
def add_standard_custom(standard, to_internal, from_internal):
"""Add a new standard to the list of supported standards with custom dicts.
`to_internal` should be a dictionary mapping your custom standard's codes to
the internal "universal" code used by this library.
`from_internal` should be a dictionary mapping the internal "universal"
codes to your custom standard's codes.
"""
TO_INTERNAL[standard] = to_internal
FROM_INTERNAL[standard] = from_internal
def _generate_initial_data():
INTERNAL_NAMES.update({
'aa': (gettext_noop(u'Afar'), u'Afar'),
'ab': (gettext_noop(u'Abkhazian'), u'Abkhazian'),
'ae': (gettext_noop(u'Avestan'), u'Avestan'),
'af': (gettext_noop(u'Afrikaans'), u'Afrikaans'),
'aka': (gettext_noop(u'Akan'), u'Akana'),
'amh': (gettext_noop(u'Amharic'), u'Amharic'),
'an': (gettext_noop(u'Aragonese'), u'Aragonés'),
'ar': (gettext_noop(u'Arabic'), u'العربية'),
'arc': (gettext_noop(u'Aramaic'), u'ܐܪܡܝܐ'),
'arq': (gettext_noop(u'Algerian Arabic'), u'دزيري/جزائري'),
'as': (gettext_noop(u'Assamese'), u'Assamese'),
'ase': (gettext_noop(u'American Sign Language'), u'American Sign Language'),
'ast': (gettext_noop(u'Asturian'), u'Asturianu'),
'av': (gettext_noop(u'Avaric'), u'Авар'),
'ay': (gettext_noop(u'Aymara'), u'Aymar'),
'az': (gettext_noop(u'Azerbaijani'), u'Azərbaycan'),
'ba': (gettext_noop(u'Bashkir'), u'Башҡорт'),
'bam': (gettext_noop(u'Bambara'), u'Bamanankan'),
'be': (gettext_noop(u'Belarusian'), u'Беларуская'),
'ber': (gettext_noop(u'Berber'), u'Berber'),
'bg': (gettext_noop(u'Bulgarian'), u'Български'),
'bh': (gettext_noop(u'Bihari'), u'भोजपुर'),
'bi': (gettext_noop(u'Bislama'), u'Bislama'),
'bn': (gettext_noop(u'Bengali'), u'Bengali'),
'bnt': (gettext_noop(u'Ibibio'), u'Ibibio'),
'bo': (gettext_noop(u'Tibetan'), u'Bod skad'),
'br': (gettext_noop(u'Breton'), u'Brezhoneg'),
'bs': (gettext_noop(u'Bosnian'), u'Bosanski'),
'bug': (gettext_noop(u'Buginese'), u'Basa Ugi'),
'ca': (gettext_noop(u'Catalan'), u'Català'),
'cak': (gettext_noop(u'Cakchiquel, Central'), u'Cakchiquel, Central'),
'ce': (gettext_noop(u'Chechen'), u'Chechen'),
'ceb': (gettext_noop(u'Cebuano'), u'Cebuano'),
'ch': (gettext_noop(u'Chamorro'), u'Chamoru'),
'cho': (gettext_noop(u'Choctaw'), u'Choctaw'),
'cku': (gettext_noop(u'Koasati'), u'Koasati'),
'co': (gettext_noop(u'Corsican'), u'Corsu'),
'cr': (gettext_noop(u'Cree'), u'Nehiyaw'),
'cs': (gettext_noop(u'Czech'), u'Čeština'),
'ctu': (gettext_noop(u'Chol, Tumbalá'), u'Chol, Tumbalá'),
'ctd': (gettext_noop(u'Chin, Tedim'), u'Chin, Tedim'),
'cu': (gettext_noop(u'Church Slavic'), u'Church Slavic'),
'cv': (gettext_noop(u'Chuvash'), u'Chuvash'),
'cy': (gettext_noop(u'Welsh'), u'Cymraeg'),
'da': (gettext_noop(u'Danish'), u'Dansk'),
'de': (gettext_noop(u'German'), u'Deutsch'),
'dv': (gettext_noop(u'Divehi'), u'Divehi'),
'dz': (gettext_noop(u'Dzongkha'), u'Dzongkha'),
'ee': (gettext_noop(u'Ewe'), u'Ewe'),
'efi': (gettext_noop(u'Efik'), u'Efik'),
'el': (gettext_noop(u'Greek'), u'Ελληνικά'),
'en': (gettext_noop(u'English'), u'English'),
'en-gb': (gettext_noop(u'English, British'), u'English, British'),
'eo': (gettext_noop(u'Esperanto'), u'Esperanto'),
'es': (gettext_noop(u'Spanish'), u'Español'),
'es-ar': (gettext_noop(u'Spanish, Argentinian'), u'Spanish, Argentinian'),
'es-mx': (gettext_noop(u'Spanish, Mexican'), u'Spanish, Mexican'),
'es-ni': (gettext_noop(u'Spanish, Nicaraguan'), u'Spanish, Nicaraguan, '),
'et': (gettext_noop(u'Estonian'), u'Eesti'),
'eu': (gettext_noop(u'Basque'), u'Euskara'),
'fa': (gettext_noop(u'Persian'), u'فارسی'),
'ff': (gettext_noop(u'Fulah'), u'Fulah'),
'fi': (gettext_noop(u'Finnish'), u'Suomi'),
'fil': (gettext_noop(u'Filipino'), u'Filipino'),
'fj': (gettext_noop(u'Fijian'), u'Na Vosa Vakaviti'),
'fo': (gettext_noop(u'Faroese'), u'Føroyskt'),
'fr': (gettext_noop(u'French'), u'Français'),
'fr-ca': (gettext_noop(u'French, Canadian'), u'French, Canadian'),
'ful': (gettext_noop(u'Fula'), u'Fula'),
'fy-nl': (gettext_noop(u'Frisian'), u'Frysk'),
'ga': (gettext_noop(u'Irish'), u'Gaeilge'),
'gd': (gettext_noop(u'Scottish Gaelic'), u'Gàidhlig'),
'gl': (gettext_noop(u'Galician'), u'Galego'),
'gn': (gettext_noop(u'Guaran'), u'Avañe\'ẽ'),
'gu': (gettext_noop(u'Gujarati'), u'ગુજરાતી'),
'gv': (gettext_noop(u'Manx'), u'Gaelg'),
'hb': (gettext_noop(u'HamariBoli (Roman Hindi-Urdu)'), u'HamariBoli'),
'hai': (gettext_noop(u'Haida'), u'Haida'),
'hau': (gettext_noop(u'Hausa'), u'هَوُسَ'),
'haw': (gettext_noop(u'Hawaiian'), u'Hawaiian'),
'haz': (gettext_noop(u'Hazaragi'), u'هزارگی'),
'hch': (gettext_noop(u'Huichol'), u'Huichol'),
'he': (gettext_noop(u'Hebrew'), u'עברית'),
'hi': (gettext_noop(u'Hindi'), u'हिन्दी'),
'ho': (gettext_noop(u'Hiri Motu'), u'Hiri Motu'),
'hr': (gettext_noop(u'Croatian'), u'Hrvatski'),
'ht': (gettext_noop(u'Creole, Haitian'), u'Creole, Haitian'),
'hu': (gettext_noop(u'Hungarian'), u'Magyar'),
'hup': (gettext_noop(u'Hupa'), u'Hupa'),
'hus': (gettext_noop(u'Huastec, Veracruz'), u'Huastec, Veracruz'),
'hy': (gettext_noop(u'Armenian'), u'Հայերեն'),
'hz': (gettext_noop(u'Herero'), u'Herero'),
'ia': (gettext_noop(u'Interlingua'), u'Interlingua'),
'ibo': (gettext_noop(u'Igbo'), u'Igbo'),
'id': (gettext_noop(u'Indonesian'), u'Bahasa Indonesia'),
'ie': (gettext_noop(u'Interlingue'), u'Interlingue'),
'ii': (gettext_noop(u'Sichuan Yi'), u'Sichuan Yi'),
'ik': (gettext_noop(u'Inupia'), u'Iñupiak'),
'ilo': (gettext_noop(u'Ilocano'), u'Ilocano'),
'inh': (gettext_noop(u'Ingush'), u'Ingush'),
'io': (gettext_noop(u'Ido'), u'Ido'),
'iro': (gettext_noop(u'Iroquoian languages'), u'Iroquoian languages'),
'is': (gettext_noop(u'Icelandic'), u'Íslenska'),
'it': (gettext_noop(u'Italian'), u'Italiano'),
'iu': (gettext_noop(u'Inuktitut'), u'Inuktitut'),
'ja': (gettext_noop(u'Japanese'), u'日本語'),
'jv': (gettext_noop(u'Javanese'), u'Basa Jawa'),
'ka': (gettext_noop(u'Georgian'), u'ქართული'),
'kar': (gettext_noop(u'Karen'), u'Karen'),
'kau': (gettext_noop(u'Kanuri'), u'Kanuri'),
'kik': (gettext_noop(u'Gikuyu'), u'Gikuyu'),
'kin': (gettext_noop(u'Rwandi'), u'Kinyarwanda'),
'kj': (gettext_noop(u'Kuanyama, Kwanyama'), u'Kuanyama, Kwanyama'),
'kk': (gettext_noop(u'Kazakh'), u'қазақша'),
'kl': (gettext_noop(u'Greenlandic'), u'Kalaallisut'),
'km': (gettext_noop(u'Khmer'), u'Khmer'),
'kn': (gettext_noop(u'Kannada'), u'ಕನ್ನಡ'),
'ko': (gettext_noop(u'Korean'), u'한국어'),
'kon': (gettext_noop(u'Kongo'), u'Kongo'),
'ks': (gettext_noop(u'Kashmiri'), u'कश्मीरी - (كشميري'),
'ksh': (gettext_noop(u'Colognian'), u'Kölsch'),
'ku': (gettext_noop(u'Kurdish'), u'Kurdî / كوردی'),
'kv': (gettext_noop(u'Komi'), u'Komi'),
'kw': (gettext_noop(u'Cornish'), u'Kernewek/Karnuack'),
'ky': (gettext_noop(u'Kyrgyz'), u'Kırgızca'),
'la': (gettext_noop(u'Latin'), u'Latina'),
'ltg': (gettext_noop(u'Latgalian'), u'Latgalian'),
'lld': (gettext_noop(u'Ladin'), u'Ladino'),
'lb': (gettext_noop(u'Luxembourgish'), u'Lëtzebuergesch'),
'lg': (gettext_noop(u'Ganda'), u'Ganda'),
'li': (gettext_noop(u'Limburgish'), u'Limburgs'),
'lin': (gettext_noop(u'Lingala'), u'Lingala'),
'lkt': (gettext_noop(u'Lakota'), u'Lakota'),
'lo': (gettext_noop(u'Lao'), u'Lao'),
'lt': (gettext_noop(u'Lithuanian'), u'Lietuvių'),
'lu': (gettext_noop(u'Luba-Katagana'), u'Luba-Katagana'),
'lua': (gettext_noop(u'Luba-Kasai'), u'Luba-Kasai'),
'luo': (gettext_noop(u'Luo'), u'Luo'),
'luy': (gettext_noop(u'Luhya'), u'Luhya'),
'lv': (gettext_noop(u'Latvian'), u'Latviešu'),
'meta-audio': (gettext_noop(u'Metadata: Audio Description'), u'Metadata: Audio Description'),
'meta-geo': (gettext_noop(u'Metadata: Geo'), u'Metadata: Geo'),
'meta-tw': (gettext_noop(u'Metadata: Twitter'), u'Metadata: Twitter'),
'meta-wiki': (gettext_noop(u'Metadata: Wikipedia'), u'Metadata: Wikipedia'),
'mad': (gettext_noop(u'Madurese'), u'Madurese'),
'mh': (gettext_noop(u'Marshallese'), u'Ebon'),
'mi': (gettext_noop(u'Maori'), u'Māori'),
'mk': (gettext_noop(u'Macedonian'), u'Македонски'),
'ml': (gettext_noop(u'Malayalam'), u'Malayalam'),
'mlg': (gettext_noop(u'Malagasy'), u'Malagasy'),
'mn': (gettext_noop(u'Mongolian'), u'Монгол'),
'mnk': (gettext_noop(u'Mandinka'), u'Mandinka'),
'mo': (gettext_noop(u'Moldavian, Moldovan'), u'Moldoveana'),
'moh': (gettext_noop(u'Mohawk'), u'Mohawk'),
'mni': (gettext_noop(u'Manipuri'), u'মৈইতৈইলোন'),
'mos': (gettext_noop(u'Mossi'), u'Mossi'),
'mr': (gettext_noop(u'Marathi'), u'मराठी'),
'ms': (gettext_noop(u'Malay'), u'Bahasa Melayu'),
'mt': (gettext_noop(u'Maltese'), u'bil-Malti'),
'mus': (gettext_noop(u'Muscogee'), u'Muscogee'),
'my': (gettext_noop(u'Burmese'), u'Myanmasa'),
'nan': (gettext_noop(u'Hokkien'), u'Hokkien'),
'na': (gettext_noop(u'Naurunan'), u'dorerin Naoero'),
'nb': (gettext_noop(u'Norwegian Bokmal'), u'Norsk Bokmål'),
'nci': (gettext_noop(u'Nahuatl, Classical'), u'Nahuatl, Classical'),
'ncj': (gettext_noop(u'Nahuatl, Northern Puebla'), u'Nahuatl, Northern Puebla'),
'nd': (gettext_noop(u'North Ndebele'), u'North Ndebele'),
'ne': (gettext_noop(u'Nepali'), u'नेपाली'),
'ng': (gettext_noop(u'Ndonga'), u'Ndonga'),
'nl': (gettext_noop(u'Dutch'), u'Nederlands'),
'nn': (gettext_noop(u'Norwegian Nynorsk'), u'Nynorsk'),
'no': (gettext_noop(u'Norwegian'), u'Norwegian'),
'nr': (gettext_noop(u'Southern Ndebele'), u'Southern Ndebele'),
'nso': (gettext_noop(u'Northern Sotho'), u'Northern Sotho'),
'nv': (gettext_noop(u'Navajo'), u'Navajo'),
'nya': (gettext_noop(u'Chewa'), u'Chewa'),
'oc': (gettext_noop(u'Occitan'), u'Occitan'),
'oji': (gettext_noop(u'Ojibwe'), u'Ojibwe'),
'or': (gettext_noop(u'Oriya'), u'Oriya'),
'orm': (gettext_noop(u'Oromo'), u'Oromoo'),
'os': (gettext_noop(u'Ossetian, Ossetic'), u'Ossetian, Ossetic'),
'pam': (gettext_noop(u'Kapampangan'), u'Pampango'),
'pap': (gettext_noop(u'Papiamento'), u'Papiamentu'),
'pan': (gettext_noop(u'Eastern Punjabi'), u'ਪੰਜਾਬੀ'),
'pi': (gettext_noop(u'Pali'), u'पािऴ'),
'pl': (gettext_noop(u'Polish'), u'Polski'),
'pnb': (gettext_noop(u'Western Punjabi'), u'پنجابی'),
'prs': (gettext_noop(u'Dari'), u'دری'),
'ps': (gettext_noop(u'Pashto'), u'پښتو'),
'pt': (gettext_noop(u'Portuguese'), u'Português'),
'pt-br': (gettext_noop(u'Portuguese, Brazilian'), u'Portuguese, Brazilian'),
'que': (gettext_noop(u'Quechua'), u'Runa Simi'),
'qvi': (gettext_noop(u'Quichua, Imbabura Highland'), u'Quichua, Imbabura Highland'),
'raj': (gettext_noop(u'Rajasthani'), u'राजस्थानी'),
'rm': (gettext_noop(u'Romansh'), u'Rumantsch'),
'ro': (gettext_noop(u'Romanian'), u'Română'),
'ru': (gettext_noop(u'Russian'), u'Русский'),
'run': (gettext_noop(u'Rundi'), u'Kirundi'),
'rup': (gettext_noop(u'Macedo'), u'Macedo'),
'ry': (gettext_noop(u'Rusyn'), u'Rusyn'),
'sa': (gettext_noop(u'Sanskrit'), u'संस्कृतम्'),
'sc': (gettext_noop(u'Sardinian'), u'Sardu'),
'sco': (gettext_noop(u'Scots'), u'Scots'),
'sd': (gettext_noop(u'Sindhi'), u'سنڌي'),
'se': (gettext_noop(u'Northern Sami'), u'Northern Sami'),
'sg': (gettext_noop(u'Sango'), u'Sängö'),
'sgn': (gettext_noop(u'Sign Languages'), u'Sign Languages'),
'sh': (gettext_noop(u'Serbo-Croatian'), u'Srpskohrvatski'),
'si': (gettext_noop(u'Sinhala'), u'Sinhalese'),
'sk': (gettext_noop(u'Slovak'), u'Slovenčina'),
'skx': (gettext_noop(u'Seko Padang'), u'Sua Tu Padang'),
'sl': (gettext_noop(u'Slovenian'), u'Slovenščina'),
'sm': (gettext_noop(u'Samoan'), u'Gagana Samoa'),
'sna': (gettext_noop(u'Shona'), u'chiShona'),
'som': (gettext_noop(u'Somali'), u'Soomaaliga'),
'sot': (gettext_noop(u'Sotho'), u'seSotho'),
'sq': (gettext_noop(u'Albanian'), u'Shqip'),
'sr': (gettext_noop(u'Serbian'), u'Српски / Srpski'),
'sr-latn': (gettext_noop(u'Serbian, Latin'), u'Serbian, Latin'),
'srp': (gettext_noop(u'Montenegrin'), u'Crnogorski jezik, Црногорски језик'),
'ss': (gettext_noop(u'Swati'), u'SiSwati'),
'su': (gettext_noop(u'Sundanese'), u'Basa Sunda'),
'sv': (gettext_noop(u'Swedish'), u'Svenska'),
'swa': (gettext_noop(u'Swahili'), u'Kiswahili'),
'szl': (gettext_noop(u'Silesian'), u'ślōnskŏ gŏdka'),
'ta': (gettext_noop(u'Tamil'), u'தமிழ்'),
'tar': (gettext_noop(u'Tarahumara, Central'), u'Ralámul'),
'tet': (gettext_noop(u'Tetum'), u'Tetum'),
'te': (gettext_noop(u'Telugu'), u'తెలుగు'),
'tg': (gettext_noop(u'Tajik'), u'Тоҷикӣ'),
'th': (gettext_noop(u'Thai'), u'ไทย'),
'tir': (gettext_noop(u'Tigrinya'), u'Tigrinya'),
'tk': (gettext_noop(u'Turkmen'), u'تركمن / Туркмен'),
'tl': (gettext_noop(u'Tagalog'), u'Tagalog'),
'tlh': (gettext_noop(u'Klingon'), u'tlhIngan-Hol'),
'to': (gettext_noop(u'Tonga'), u'faka Tonga'),
'toj': (gettext_noop(u'Tojolabal'), u'Tojolabal'),
'tr': (gettext_noop(u'Turkish'), u'Türkçe'),
'ts': (gettext_noop(u'Tsonga'), u'Xitsonga'),
'tsn': (gettext_noop(u'Tswana'), u'Setswana'),
'tsz': (gettext_noop(u'Purepecha'), u'Purepecha'),
'tt': (gettext_noop(u'Tatar'), u'Tatarça / Татарча'),
'tw': (gettext_noop(u'Twi'), u'Twi'),
'ty': (gettext_noop(u'Tahitian'), u'Tahitian'),
'tzh': (gettext_noop(u'Tzeltal, Oxchuc'), u'Tzeltal, Oxchuc'),
'tzo': (gettext_noop(u'Tzotzil, Venustiano Carranza'),
u'Tzotzil, Venustiano Carranza'),
'uk': (gettext_noop(u'Ukrainian'), u'Українська'),
'umb': (gettext_noop(u'Umbundu'), u'Umbundu'),
'ug': (gettext_noop(u'Uyghur'), u'ئۇيغۇر'),
'ur': (gettext_noop(u'Urdu'), u'اڙدو'),
'uz': (gettext_noop(u'Uzbek'), u'O‘zbek'),
've': (gettext_noop(u'Venda'), u'Venda'),
'vi': (gettext_noop(u'Vietnamese'), u'Tiếng Việt'),
'vls': (gettext_noop(u'Flemish'), u'Vlaams'),
'vo': (gettext_noop(u'Volapuk'), u'Volapük'),
'wa': (gettext_noop(u'Walloon'), u'Walon'),
'wbl': (gettext_noop(u'Wakhi'), u'Wakhi'),
'wol': (gettext_noop(u'Wolof'), u'Wollof'),
'xho': (gettext_noop(u'Xhosa'), u'isiXhosa'),
'yaq': (gettext_noop(u'Yaqui'), u'Yaqui'),
'yi': (gettext_noop(u'Yiddish'), u'ייִדיש'),
'yor': (gettext_noop(u'Yoruba'), u'Yorùbá'),
'yua': (gettext_noop(u'Maya, Yucatán'), u'Maya, Yucatán'),
'za': (gettext_noop(u'Zhuang, Chuang'), u'Cuengh'),
'zam': (gettext_noop(u'Zapotec, Miahuatlán'), u'Zapotec, Miahuatlán'),
'zh': (gettext_noop(u'Chinese, Yue'), u'中文'),
'zh-cn': (gettext_noop(u'Chinese, Simplified'), u'简体中文'),
'zh-tw': (gettext_noop(u'Chinese, Traditional'), u'繁體中文'),
'zh-sg': (gettext_noop(u'Chinese, Simplified (Singaporean)'), u''),
'zh-hk': (gettext_noop(u'Chinese, Traditional (Hong Kong)'), u''),
'zul': (gettext_noop(u'Zulu'), u'isiZulu'),
})
def _add_iso_639_1():
add_standard('iso-639-1', {
'ab': 'ab',
'aa': 'aa',
'af': 'af',
'ak': 'aka',
'sq': 'sq',
'am': 'amh',
'ar': 'ar',
'an': 'an',
'hy': 'hy',
'as': 'as',
'av': 'av',
'ae': 'ae',
'ay': 'ay',
'az': 'az',
'bm': 'bam',
'ba': 'ba',
'eu': 'eu',
'be': 'be',
'bn': 'bn',
'bh': 'bh',
'bi': 'bi',
'bs': 'bs',
'br': 'br',
'bg': 'bg',
'my': 'my',
'ca': 'ca',
'km': 'km',
'ch': 'ch',
'ce': 'ce',
'ny': 'nya',
'zh': 'zh',
'cu': 'cu',
'cv': 'cv',
'kw': 'kw',
'co': 'co',
'cr': 'cr',
'hr': 'hr',
'cs': 'cs',
'da': 'da',
'dv': 'dv',
'nl': 'nl',
'dz': 'dz',
'en': 'en',
'eo': 'eo',
'et': 'et',
'ee': 'ee',
'fo': 'fo',
'fj': 'fj',
'fi': 'fi',
'fr': 'fr',
'ff': 'ff',
'gl': 'gl',
'lg': 'lg',
'ka': 'ka',
'de': 'de',
'el': 'el',
'gn': 'gn',
'gu': 'gu',
'ht': 'ht',
'ha': 'hau',
'he': 'he',
'hz': 'hz',
'hi': 'hi',
'ho': 'ho',
'hu': 'hu',
'is': 'is',
'io': 'io',
'ig': 'ibo',
'id': 'id',
'ia': 'ia',
'ie': 'ie',
'iu': 'iu',
'ik': 'ik',
'ga': 'ga',
'it': 'it',
'ja': 'ja',
'jv': 'jv',
'kl': 'kl',
'kn': 'kn',
'kr': 'kau',
'ks': 'ks',
'kk': 'kk',
'ki': 'kik',
'rw': 'kin',
'ky': 'ky',
'kv': 'kv',
'kg': 'kon',
'ko': 'ko',
'kj': 'kj',
'ku': 'ku',
'lo': 'lo',
'la': 'la',
'lv': 'lv',
'li': 'li',
'ln': 'lin',
'lt': 'lt',
'lu': 'lu',
'lb': 'lb',
'mk': 'mk',
'mg': 'mlg',
'ms': 'ms',
'ml': 'ml',
'mt': 'mt',
'gv': 'gv',
'mi': 'mi',
'mr': 'mr',
'mh': 'mh',
'mo': 'mo',
'mn': 'mn',
'na': 'na',
'nv': 'nv',
'ng': 'ng',
'ne': 'ne',
'nd': 'nd',
'se': 'se',
'no': 'nb',
'nb': 'nb',
'nn': 'nn',
'oc': 'oc',
'oj': 'oji',
'or': 'or',
'om': 'orm',
'os': 'os',
'pi': 'pi',
'pa': 'pa',
'fa': 'fa',
'pl': 'pl',
'pt': 'pt',
'ps': 'ps',
'qu': 'que',
'ro': 'ro',
'rm': 'rm',
'rn': 'run',
'ru': 'ru',
'ry': 'ry',
'sm': 'sm',
'sg': 'sg',
'sa': 'sa',
'sc': 'sc',
'gd': 'gd',
'sr': 'sr',
'sh': 'sh',
'sn': 'sna',
'ii': 'ii',
'sd': 'sd',
'si': 'si',
'sk': 'sk',
'sl': 'sl',
'so': 'som',
'st': 'sot',
'nr': 'nr',
'es': 'es',
'su': 'su',
'sw': 'swa',
'ss': 'ss',
'sv': 'sv',
'tl': 'tl',
'ty': 'ty',
'tg': 'tg',
'ta': 'ta',
'tt': 'tt',
'te': 'te',
'th': 'th',
'bo': 'bo',
'ti': 'tir',
'to': 'to',
'ts': 'ts',
'tn': 'tsn',
'tr': 'tr',
'tk': 'tk',
'tw': 'tw',
'ug': 'ug',
'uk': 'uk',
'ur': 'ur',
'uz': 'uz',
've': 've',
'vi': 'vi',
'vo': 'vo',
'wa': 'wa',
'cy': 'cy',
'fy': 'fy-nl',
'wo': 'wol',
'xh': 'xho',
'yi': 'yi',
'yo': 'yor',
'za': 'za',
'zu': 'zul',
})
def _add_django():
add_standard('django', {
'ar': 'ar',
'az': 'az',
'bg': 'bg',
'bn': 'bn',
'bs': 'bs',
'ca': 'ca',
'cs': 'cs',
'cy': 'cy',
'da': 'da',
'de': 'de',
'el': 'el',
'en': 'en',
'en-gb': 'en-gb',
'es': 'es',
'es-ar': 'es-ar',
'es-mx': 'es-mx',
'es-ni': 'es-ni',
'et': 'et',
'eu': 'eu',
'fa': 'fa',
'fi': 'fi',
'fr': 'fr',
'fr-ca': 'fr-ca',
'fy-nl': 'fy-nl',
'ga': 'ga',
'gl': 'gl',
'he': 'he',
'hi': 'hi',
'hr': 'hr',
'hu': 'hu',
'id': 'id',
'is': 'is',
'it': 'it',
'ja': 'ja',
'ka': 'ka',
'km': 'km',
'kn': 'kn',
'ko': 'ko',
'lt': 'lt',
'lv': 'lv',
'mk': 'mk',
'ml': 'ml',
'mn': 'mn',
'nl': 'nl',
'nb': 'nb',
'nn': 'nn',
'pl': 'pl',
'pt': 'pt',
'pt-br': 'pt-br',
'ro': 'ro',
'ru': 'ru',
'sk': 'sk',
'sl': 'sl',
'sq': 'sq',
'sr': 'sr',
'sr-latn': 'sr-latn',
'sv': 'sv',
'ta': 'ta',
'te': 'te',
'th': 'th',
'tr': 'tr',
'uk': 'uk',
'ur': 'ur',
'vi': 'vi',
'zh-cn': 'zh-cn',
'zh-tw': 'zh-tw',
})
def _add_unisubs():
add_standard('unisubs', {
'aa': 'aa',
'ab': 'ab',
'ae': 'ae',
'af': 'af',
'aka': 'aka',
'amh': 'amh',
'an': 'an',
'arc': 'arc',
'arq': 'arq',
'as': 'as',
'ase': 'ase',
'ast': 'ast',
'av': 'av',
'ay': 'ay',
'ba': 'ba',
'bam': 'bam',
'be': 'be',
'ber': 'ber',
'bh': 'bh',
'bi': 'bi',
'bnt': 'bnt',
'bo': 'bo',
'br': 'br',
'bug': 'bug',
'cak': 'cak',
'ce': 'ce',
'ceb': 'ceb',
'ch': 'ch',
'cho': 'cho',
'cku': 'cku',
'co': 'co',
'cr': 'cr',
'ctu': 'ctu',
'ctd': 'ctd',
'cu': 'cu',
'cv': 'cv',
'dv': 'dv',
'dz': 'dz',
'ee': 'ee',
'efi': 'efi',
'en-gb': 'en-gb',
'eo': 'eo',
'es-ar': 'es-ar',
'ff': 'ff',
'fil': 'fil',
'fj': 'fj',
'fo': 'fo',
'fr-ca': 'fr-ca',
'ful': 'ful',
'ga': 'ga',
'gd': 'gd',
'gn': 'gn',
'gu': 'gu',
'gv': 'gv',
'hai': 'hai',
'hau': 'hau',
'haw': 'haw',
'haz': 'haz',
'hb': 'hb',
'hch': 'hch',
'ho': 'ho',
'ht': 'ht',
'hup': 'hup',
'hus': 'hus',
'hy': 'hy',
'hz': 'hz',
'ia': 'ia',
'ibo': 'ibo',
'ie': 'ie',
'ii': 'ii',
'ik': 'ik',
'ilo': 'ilo',
'iro': 'iro',
'inh': 'inh',
'inh': 'inh',
'io': 'io',
'iro': 'iro',
'iu': 'iu',
'jv': 'jv',
'kar': 'kar',
'kau': 'kau',
'kik': 'kik',
'kin': 'kin',
'kj': 'kj',
'kk': 'kk',
'kl': 'kl',
'kon': 'kon',
'ks': 'ks',
'ksh' : 'ksh',
'ku': 'ku',
'kv': 'kv',
'kw': 'kw',
'ky': 'ky',
'la': 'la',
'lld': 'lld',
'lb': 'lb',
'lg': 'lg',
'li': 'li',
'lin': 'lin',
'lkt': 'lkt',
'lo': 'lo',
'ltg': 'ltg',
'lu': 'lu',
'lua': 'lua',
'luo': 'luo',
'luy': 'luy',
'meta-audio':'meta-audio',
'meta-geo': 'meta-geo',
'meta-tw': 'meta-tw',
'meta-wiki': 'meta-wiki',
'mad': 'mad',
'mh': 'mh',
'mi': 'mi',
'ml': 'ml',
'mlg': 'mlg',
'mni': 'mni',
'mnk': 'mnk',
'mo': 'mo',
'moh': 'moh',
'mos': 'mos',
'mr': 'mr',
'ms': 'ms',
'mt': 'mt',
'mus': 'mus',
'my': 'my',
'na': 'na',
'nan': 'nan',
'nci': 'nci',
'nd': 'nd',
'ne': 'ne',
'ng': 'ng',
'nr': 'nr',
'nso': 'nso',
'nv': 'nv',
'nya': 'nya',
'oc': 'oc',
'oji': 'oji',
'or': 'or',
'orm': 'orm',
'os': 'os',
'pam': 'pam',
'pan': 'pan',
'pap': 'pap',
'pi': 'pi',
'pnb': 'pnb',
'prs': 'prs',
'ps': 'ps',
'pt-br': 'pt-br',
'que': 'que',
'qvi': 'qvi',
'raj': 'raj',
'rm': 'rm',
'run': 'run',
'rup': 'rup',
'ry': 'ry',
'sa': 'sa',
'sc': 'sc',
'sco': 'sco',
'sd': 'sd',
'se': 'se',
'sg': 'sg',
'sgn': 'sgn',
'skx': 'skx',
'sh': 'sh',
'si': 'si',
'sm': 'sm',
'sna': 'sna',
'som': 'som',
'sot': 'sot',
'sr-latn': 'sr-latn',
'srp': 'srp',
'ss': 'ss',
'su': 'su',
'swa': 'swa',
'szl': 'szl',
'tar': 'tar',
'tet': 'tet',
'tg': 'tg',
'tir': 'tir',
'tk': 'tk',
'tl': 'tl',
'tlh': 'tlh',
'to': 'to',
'toj': 'toj',
'ts': 'ts',
'tsz': 'tsz',
'tsn': 'tsn',
'tzh': 'tzh',
'tzo': 'tzo',
'tt': 'tt',
'tw': 'tw',
'ty': 'ty',
'ug': 'ug',
'umb': 'umb',
'uz': 'uz',
've': 've',
'vls': 'vls',
'vo': 'vo',
'wa': 'wa',
'wbl': 'wbl',
'wol': 'wol',
'xho': 'xho',
'yaq': 'yaq',
'yi': 'yi',
'yor': 'yor',
'yua': 'yua',
'za': 'za',
'zam': 'zam',
'zh': 'zh',
'zh-cn': 'zh-cn',
'zh-tw': 'zh-tw',
'zh-sg': 'zh-sg',
'zh-hk': 'zh-hk',
'zul': 'zul',
}, base='django')
def _add_youtube():
add_standard('youtube', {
'aa': 'aa',
'ab': 'ab',
'ae': 'ae',
'af': 'af',
'ak': 'aka',
'am': 'amh',
'an': 'an',
'ar': 'ar',
'as': 'as',
'ast': 'ast',
'av': 'av',
'ay': 'ay',
'az': 'az',
'ba': 'ba',
'bm': 'bam',
'ber': 'ber',
'be': 'be',
'bg': 'bg',
'bh': 'bh',
'bi': 'bi',
'bn': 'bn',
'bnt': 'bnt',
'bo': 'bo',
'br': 'br',
'bs': 'bs',
'ce': 'ce',
'ceb': 'ceb',
'ca': 'ca',
'ch': 'ch',
'cho': 'cho',
'co': 'co',
'cr': 'cr',
'cs': 'cs',
'cu': 'cu',
'cv': 'cv',
'cy': 'cy',
'da': 'da',
'de': 'de',
'dv': 'dv',
'dz': 'dz',
'ee': 'ee',
'efi': 'efi',
'el': 'el',
'en': 'en',
'en-GB': 'en-gb',
'en-US': 'en',
'eo': 'eo',
'es-AR': 'es-ar',
'es-ES': 'es',
'es-NI': 'es-ni',
'es-MX': 'es-mx',
'et': 'et',
'eu': 'eu',
'fa': 'fa',
'fa-AF': 'fa',
'fi': 'fi',
'fil': 'fil',
'ff': 'ff',
'fj': 'fj',
'fo': 'fo',
'fr': 'fr',
'fr-CA': 'fr-ca',
'fy': 'fy-nl',
'ga': 'ga',
'gd': 'gd',
'gl': 'gl',
'gn': 'gn',
'gu': 'gu',
'gv': 'gv',
'ha': 'hau',
'hai': 'hai',
'hi': 'hi',
'ho': 'ho',
'hr': 'hr',
'hu': 'hu',
'ht': 'ht',
'hup': 'hup',
'hy': 'hy',
'hz': 'hz',
'ia': 'ia',
'id': 'id',
'ie': 'ie',
'ig': 'ibo',
'ii': 'ii',
'ik': 'ik',
'ilo': 'ilo',
'inh': 'inh',
'io': 'io',
'iu': 'iu',
'iro': 'iro',
'is': 'is',
'it': 'it',
'iw': 'he',
'ja': 'ja',
'jv': 'jv',
'ka': 'ka',
'kar': 'kar',
'kg': 'kon',
'ki': 'kik',
'kk': 'kk',
'kj': 'kj',
'kl': 'kl',
'km': 'km',
'kn': 'kn',
'ko': 'ko',
'ks': 'ks',
'ksh': 'ksh',
'kr': 'kau',
'ku': 'ku',
'ky': 'ky',
'kv': 'kv',
'kw': 'kw',
'la': 'la',
'lb': 'lb',
'lg': 'lg',
'li': 'li',
'lld': 'lld',
'ln': 'lin',
'lo': 'lo',
'lt': 'lt',
'lu': 'lu',
'lua': 'lua',
'luo': 'luo',
'luy': 'luy',
'lv': 'lv',
'mad': 'mad',
'mg': 'mlg',
'mh': 'mh',
'mi': 'mi',
'mk': 'mk',
'ml': 'ml',
'mn': 'mn',
'mni': 'mni',
'mo': 'mo',
'moh': 'moh',
'mos': 'mos',
'mr': 'mr',
'ms': 'ms',
'mt': 'mt',
'my': 'my',
'na': 'na',
'nd': 'nd',
'ne': 'ne',
'ng': 'ng',
'nl': 'nl',
'nl-BE': 'nl',
'nn': 'nn',
'no': 'nb',
'nb': 'nb',
'nr': 'nr',
'nso': 'nso',
'nv': 'nv',
'ny': 'nya',
'oc': 'oc',
'oj': 'oji',
'om': 'orm',
'or': 'or',
'os': 'os',
'pa': 'pa',
'pap': 'pap',
'pi': 'pi',
'pl': 'pl',
'ps': 'ps',
'pt-BR': 'pt-br',
'pt-PT': 'pt',
'qu': 'que',
'rm': 'rm',
'rn': 'run',
'ro': 'ro',
'ru': 'ru',
'rup': 'rup',
'rw': 'kin',
'rue-UA': 'ry',
'sa': 'sa',
'sc': 'sc',
'sd': 'sd',
'se': 'se',
'sg': 'sg',
'sh': 'sh',
'si': 'si',
'sk': 'sk',
'sl': 'sl',
'sm': 'sm',
'sn': 'sna',
'so': 'som',
'sq': 'sq',
'sr': 'sr',
'sr-Latn': 'sr-latn',
'ss': 'ss',
'st': 'sot',
'su': 'su',
'sv': 'sv',
'sw': 'swa',
'ta': 'ta',
'te': 'te',
'tet': 'tet',
'tg': 'tg',
'th': 'th',
'ti': 'tir',
'tk': 'tk',
'tl': 'tl',
'tlh': 'tlh',
'tn': 'tsn',
'to': 'to',
'tr': 'tr',
'ts': 'ts',
'tt': 'tt',
'ty': 'ty',
'tw': 'tw',
'uk': 'uk',
'ug': 'ug',
'ur': 'ur',
'umb': 'umb',
'uz': 'uz',
've': 've',
'vi': 'vi',
'vo': 'vo',
'wa': 'wa',
'wo': 'wol',
'xh': 'xho',
'yi': 'yi',
'yo': 'yor',
'zh': 'zh-hk',
'zh-CN': 'zh-cn',
'zh-HK': 'zh-hk',
'zh-Hans': 'zh-cn',
'zh-Hant': 'zh-tw',
'zh_Hant-HK': 'nan',
# we need to fix unilangs what to do when
# two dialects point to the same main language
'zh-SG': 'zh-sg',
'zh-TW': 'zh-tw',
'za': 'za',
'zu': 'zul'})
def _add_bcp47():
add_standard_custom('bcp47', StrictBCP47ToUnilangConverter(),
UNILANGS_TO_BCP47)
add_standard_custom('bcp47-lossy', LossyBCP47ToUnilangConverter(),
UNILANGS_TO_BCP47)
_generate_initial_data()
_add_iso_639_1()
_add_django()
_add_unisubs()
_add_youtube()
_add_bcp47()
class LanguageCode(object):
def __init__(self, language_code, standard):
try:
standard_dict = TO_INTERNAL[standard.lower()]
except KeyError:
raise Exception("Standard '%s' is not registred" % standard)
self._code = standard_dict[language_code]
def encode(self, standard, fuzzy=False):
"""Return the code for this language in the given standard."""
if fuzzy:
return self._fuzzy_encode(standard)
else:
return FROM_INTERNAL[standard.lower()][self._code]
def _fuzzy_encode(self, standard):
"""Return the code or closest approximate for this language in the given standard.
This will try harder than the `encode()` function, but may result in
data loss. For example:
>>> lc = LanguageCode('en-gb', 'django')
>>> lc.name()
'British English'
>>> lc.encode('iso-639-1')
KeyError...
>>> lc.fuzzy_encode('iso-639-1')
'en'
Here's an example of how you can lose data:
>>> original = 'en-gb' # Start with 'en-gb'
>>> lc = LanguageCode(original, 'django') # Decode as Django
>>> new_lang = lc.fuzzy_encode('iso-639-1') # Fuzzy encode to ISO-639-1
>>> new_lc = LanguageCode(new_lang, 'iso-639-1') # Decode as ISO-639-1
>>> result = new_lc.encode('django') # Encode back to Django
>>> assert original != result
"""
# TODO: This.
return
def name(self):
"""Return the English name for this language as a unicode string.
Note: The strings returned from this function have already been marked
with gettext_noop, so they should be safe to use with gettext to
translate into another language.
"""
return INTERNAL_NAMES[self._code][0]
def native_name(self):
"""Return the native name for this language as a unicode string."""
return INTERNAL_NAMES[self._code][1]
def aliases(self):
"""Return the "aliases" for this language code.
This is easiest to describe with an example:
>>> LanguageCode('en', 'iso-639-1').aliases()
{ 'iso-639-1': 'en',
'iso-639-2': 'eng',
'django': 'en',
# ...
}
"""
standards = FROM_INTERNAL.keys()
return dict([(standard, FROM_INTERNAL[standard][self._code])
for standard in standards
if FROM_INTERNAL[standard].get(self._code)])
def get_language_name_mapping(standard):
"""Return a dict of code -> english name for all languages in the standard."""
return dict((code, LanguageCode(code, standard).name())
for code in TO_INTERNAL.get(standard))
def get_language_native_mapping(standard):
"""Return a dict of code -> native name for all languages in the standard."""
return dict((code, LanguageCode(code, standard).native_name())
for code in TO_INTERNAL.get(standard))
def get_language_code_mapping(standard):
"""Return a dict of code -> LanguageCode for all languages in the standard."""
return dict((code, LanguageCode(code, standard))
for code in TO_INTERNAL.get(standard))
def _debug_missing_languages(standard):
"""Return a list of all the languages missing from the given standard."""
return [(internal_code, name)
for internal_code, name in INTERNAL_NAMES.items()
if internal_code not in FROM_INTERNAL]
def _debug_missing_language_codes(standard, reference_standard='unisubs'):
"""
Return a list of all the languages codes missing from the given standard
"""
unisubs_langs = set(get_language_code_mapping(reference_standard).keys())
standard_langs = set()
[standard_langs.add(LanguageCode(lc, standard).encode(reference_standard)) \
for lc in get_language_code_mapping(standard).keys()]
return list(unisubs_langs.difference(standard_langs))
add mappings for amara-v gh-37
# -*- coding: utf-8 -*-
"""Universal Language Codes
This library aims to provide an [en/de]coding utility for language codes.
To get a "universal" language code you create a LanguageCode object, giving it
the code and the standard it should use to look up that code.
>>> lc = LanguageCode('en', 'iso-639-1')
Internally the code is stored in a custom standard designed specifically for
this purpose. It doesn't have any use in the real world, so to get a useful
representation out you "encode" the code:
>>> lc.encode('iso-639-2')
'eng'
This is similar to Python's handling of Unicode and byte strings:
>>> s = 'Hello, world'.decode('ascii')
>>> s
u'Hello, world'
>>> s.encode('utf-8')
'Hello, world'
"""
import copy
from .bcp47.converter import (
StrictBCP47ToUnilangConverter, LossyBCP47ToUnilangConverter,
UNILANGS_TO_BCP47
)
def _reverse_dict(d):
return dict([(v, k) for k, v in d.items()])
# INTERNAL_NAMES stores the English and native names for the various languages.
#
# Code -> (English name, Native name)
#
# { 'ar': (u'Arabic', u'العربية'),
# 'el': (u'Greek', u'Ελληνικά'),
# ... }
INTERNAL_NAMES = {}
# TO_INTERNAL is a dict of dicts.
#
# The first level is a mapping of standard names to their dicts.
#
# The second level (one per standard) is a mapping of that standard's language
# codes to the internal language codes.
#
# { 'iso-639-1': {
# 'ak': 'aka',
# ...
# },
# ... }
TO_INTERNAL = {}
# FROM_INTERNAL is a dict of dicts.
#
# The first level is a mapping of standard names to their dicts.
#
# The second level (one per standard) is a mapping of internal language codes
# to that standard's language codes.
#
# { 'iso-639-1': {
# 'ak': 'aka',
# ...
# },
# ... }
FROM_INTERNAL = {}
gettext_noop = lambda s: s
def add_standard(standard, mapping, base=None, exclude=None):
"""Add a new standard to the list of supported standards.
`mapping` should be a dictionary mapping your custom standard's codes to the
internal "universal" code used by this library.
`base` is optional. If given it will use the given standard as a base and
copy all of its mappings before updating it with the ones you pass in
through `mappings`.
This can be useful for creating your own custom standard that's mostly like
an existing one except for a few changes:
>>> add_standard('my-standard', {'american': 'en'}, base='iso-639-1')
This example creates a new custom standard, which is pretty much like
ISO-639-1 but adds a code called 'american' that represents the English
language. Now you can do:
>>> lc = LanguageCode('american', 'my-standard')
>>> lc.encode('iso-639-2')
'en'
You can pass a list of codes to exclude from the base through the `exclude`
parameter:
>>> add_standard('my-standard', {'american': 'en'},
base='iso-639-1', exclude=('no', 'en'))
"""
if base:
m = copy.copy(TO_INTERNAL[base])
m.update(mapping)
if exclude:
for c in exclude:
del m[c]
else:
m = mapping
TO_INTERNAL[standard] = m
FROM_INTERNAL[standard] = _reverse_dict(m)
def add_standard_custom(standard, to_internal, from_internal):
"""Add a new standard to the list of supported standards with custom dicts.
`to_internal` should be a dictionary mapping your custom standard's codes to
the internal "universal" code used by this library.
`from_internal` should be a dictionary mapping the internal "universal"
codes to your custom standard's codes.
"""
TO_INTERNAL[standard] = to_internal
FROM_INTERNAL[standard] = from_internal
def _generate_initial_data():
INTERNAL_NAMES.update({
'aa': (gettext_noop(u'Afar'), u'Afar'),
'ab': (gettext_noop(u'Abkhazian'), u'Abkhazian'),
'ae': (gettext_noop(u'Avestan'), u'Avestan'),
'af': (gettext_noop(u'Afrikaans'), u'Afrikaans'),
'aka': (gettext_noop(u'Akan'), u'Akana'),
'amh': (gettext_noop(u'Amharic'), u'Amharic'),
'an': (gettext_noop(u'Aragonese'), u'Aragonés'),
'ar': (gettext_noop(u'Arabic'), u'العربية'),
'arc': (gettext_noop(u'Aramaic'), u'ܐܪܡܝܐ'),
'arq': (gettext_noop(u'Algerian Arabic'), u'دزيري/جزائري'),
'as': (gettext_noop(u'Assamese'), u'Assamese'),
'ase': (gettext_noop(u'American Sign Language'), u'American Sign Language'),
'ast': (gettext_noop(u'Asturian'), u'Asturianu'),
'av': (gettext_noop(u'Avaric'), u'Авар'),
'ay': (gettext_noop(u'Aymara'), u'Aymar'),
'az': (gettext_noop(u'Azerbaijani'), u'Azərbaycan'),
'ba': (gettext_noop(u'Bashkir'), u'Башҡорт'),
'bam': (gettext_noop(u'Bambara'), u'Bamanankan'),
'be': (gettext_noop(u'Belarusian'), u'Беларуская'),
'ber': (gettext_noop(u'Berber'), u'Berber'),
'bg': (gettext_noop(u'Bulgarian'), u'Български'),
'bh': (gettext_noop(u'Bihari'), u'भोजपुर'),
'bi': (gettext_noop(u'Bislama'), u'Bislama'),
'bn': (gettext_noop(u'Bengali'), u'Bengali'),
'bnt': (gettext_noop(u'Ibibio'), u'Ibibio'),
'bo': (gettext_noop(u'Tibetan'), u'Bod skad'),
'br': (gettext_noop(u'Breton'), u'Brezhoneg'),
'bs': (gettext_noop(u'Bosnian'), u'Bosanski'),
'bug': (gettext_noop(u'Buginese'), u'Basa Ugi'),
'ca': (gettext_noop(u'Catalan'), u'Català'),
'cak': (gettext_noop(u'Cakchiquel, Central'), u'Cakchiquel, Central'),
'ce': (gettext_noop(u'Chechen'), u'Chechen'),
'ceb': (gettext_noop(u'Cebuano'), u'Cebuano'),
'ch': (gettext_noop(u'Chamorro'), u'Chamoru'),
'cho': (gettext_noop(u'Choctaw'), u'Choctaw'),
'cku': (gettext_noop(u'Koasati'), u'Koasati'),
'co': (gettext_noop(u'Corsican'), u'Corsu'),
'cr': (gettext_noop(u'Cree'), u'Nehiyaw'),
'cs': (gettext_noop(u'Czech'), u'Čeština'),
'ctu': (gettext_noop(u'Chol, Tumbalá'), u'Chol, Tumbalá'),
'ctd': (gettext_noop(u'Chin, Tedim'), u'Chin, Tedim'),
'cu': (gettext_noop(u'Church Slavic'), u'Church Slavic'),
'cv': (gettext_noop(u'Chuvash'), u'Chuvash'),
'cy': (gettext_noop(u'Welsh'), u'Cymraeg'),
'da': (gettext_noop(u'Danish'), u'Dansk'),
'de': (gettext_noop(u'German'), u'Deutsch'),
'de-at': (gettext_noop(u'German (Austria)'), u'Deutsch (Österreich)'),
'de-ch': (gettext_noop(u'German (Switzerland)'), u'Deutsch (Schweiz)'),
'din': (gettext_noop(u'Dinka'), u'Dinka'),
'dv': (gettext_noop(u'Divehi'), u'Divehi'),
'dz': (gettext_noop(u'Dzongkha'), u'Dzongkha'),
'ee': (gettext_noop(u'Ewe'), u'Ewe'),
'efi': (gettext_noop(u'Efik'), u'Efik'),
'el': (gettext_noop(u'Greek'), u'Ελληνικά'),
'en': (gettext_noop(u'English'), u'English'),
'en-gb': (gettext_noop(u'English, British'), u'English, British'),
'en-ca': (gettext_noop(u'English (Canada)'), u'English (Canada)'),
'en-ie': (gettext_noop(u'English (Ireland)'), u'English (Ireland)'),
'eo': (gettext_noop(u'Esperanto'), u'Esperanto'),
'es': (gettext_noop(u'Spanish'), u'Español'),
'es-419': (gettext_noop(u'Spanish (Latin America)'), u'Español (América Latina)'),
'es-ar': (gettext_noop(u'Spanish, Argentinian'), u'Spanish, Argentinian'),
'es-mx': (gettext_noop(u'Spanish, Mexican'), u'Spanish, Mexican'),
'es-ni': (gettext_noop(u'Spanish, Nicaraguan'), u'Spanish, Nicaraguan, '),
'et': (gettext_noop(u'Estonian'), u'Eesti'),
'eu': (gettext_noop(u'Basque'), u'Euskara'),
'fa': (gettext_noop(u'Persian'), u'فارسی'),
'fa-af': (gettext_noop(u'Persian (Afghanistan)'), u'فارسی (افغانستان)'),
'ff': (gettext_noop(u'Fulah'), u'Fulah'),
'fi': (gettext_noop(u'Finnish'), u'Suomi'),
'fil': (gettext_noop(u'Filipino'), u'Filipino'),
'fj': (gettext_noop(u'Fijian'), u'Na Vosa Vakaviti'),
'fo': (gettext_noop(u'Faroese'), u'Føroyskt'),
'fr': (gettext_noop(u'French'), u'Français'),
'fr-be': (gettext_noop(u'French (Belgium)'), u'Français (Belgique)'),
'fr-ca': (gettext_noop(u'French (Canada)'), u'French (Canada)'),
'fr-ch': (gettext_noop(u'French (Switzerland)'), u'French (Suisse)'),
'ful': (gettext_noop(u'Fula'), u'Fula'),
'fy-nl': (gettext_noop(u'Frisian'), u'Frysk'),
'ga': (gettext_noop(u'Irish'), u'Gaeilge'),
'gd': (gettext_noop(u'Scottish Gaelic'), u'Gàidhlig'),
'gl': (gettext_noop(u'Galician'), u'Galego'),
'gn': (gettext_noop(u'Guaran'), u'Avañe\'ẽ'),
'gu': (gettext_noop(u'Gujarati'), u'ગુજરાતી'),
'gv': (gettext_noop(u'Manx'), u'Gaelg'),
'hb': (gettext_noop(u'HamariBoli (Roman Hindi-Urdu)'), u'HamariBoli'),
'hai': (gettext_noop(u'Haida'), u'Haida'),
'hau': (gettext_noop(u'Hausa'), u'هَوُسَ'),
'haw': (gettext_noop(u'Hawaiian'), u'Hawaiian'),
'haz': (gettext_noop(u'Hazaragi'), u'هزارگی'),
'hch': (gettext_noop(u'Huichol'), u'Huichol'),
'he': (gettext_noop(u'Hebrew'), u'עברית'),
'hi': (gettext_noop(u'Hindi'), u'हिन्दी'),
'ho': (gettext_noop(u'Hiri Motu'), u'Hiri Motu'),
'hr': (gettext_noop(u'Croatian'), u'Hrvatski'),
'ht': (gettext_noop(u'Creole, Haitian'), u'Creole, Haitian'),
'hu': (gettext_noop(u'Hungarian'), u'Magyar'),
'hup': (gettext_noop(u'Hupa'), u'Hupa'),
'hus': (gettext_noop(u'Huastec, Veracruz'), u'Huastec, Veracruz'),
'hy': (gettext_noop(u'Armenian'), u'Հայերեն'),
'hz': (gettext_noop(u'Herero'), u'Herero'),
'ia': (gettext_noop(u'Interlingua'), u'Interlingua'),
'ibo': (gettext_noop(u'Igbo'), u'Igbo'),
'id': (gettext_noop(u'Indonesian'), u'Bahasa Indonesia'),
'ie': (gettext_noop(u'Interlingue'), u'Interlingue'),
'ii': (gettext_noop(u'Sichuan Yi'), u'Sichuan Yi'),
'ik': (gettext_noop(u'Inupia'), u'Iñupiak'),
'ilo': (gettext_noop(u'Ilocano'), u'Ilocano'),
'inh': (gettext_noop(u'Ingush'), u'Ingush'),
'io': (gettext_noop(u'Ido'), u'Ido'),
'iro': (gettext_noop(u'Iroquoian languages'), u'Iroquoian languages'),
'is': (gettext_noop(u'Icelandic'), u'Íslenska'),
'it': (gettext_noop(u'Italian'), u'Italiano'),
'iu': (gettext_noop(u'Inuktitut'), u'Inuktitut'),
'ja': (gettext_noop(u'Japanese'), u'日本語'),
'jv': (gettext_noop(u'Javanese'), u'Basa Jawa'),
'ka': (gettext_noop(u'Georgian'), u'ქართული'),
'kar': (gettext_noop(u'Karen'), u'Karen'),
'kau': (gettext_noop(u'Kanuri'), u'Kanuri'),
'kik': (gettext_noop(u'Gikuyu'), u'Gikuyu'),
'kin': (gettext_noop(u'Rwandi'), u'Kinyarwanda'),
'kj': (gettext_noop(u'Kuanyama, Kwanyama'), u'Kuanyama, Kwanyama'),
'kk': (gettext_noop(u'Kazakh'), u'қазақша'),
'kl': (gettext_noop(u'Greenlandic'), u'Kalaallisut'),
'km': (gettext_noop(u'Khmer'), u'Khmer'),
'kn': (gettext_noop(u'Kannada'), u'ಕನ್ನಡ'),
'ko': (gettext_noop(u'Korean'), u'한국어'),
'kon': (gettext_noop(u'Kongo'), u'Kongo'),
'ks': (gettext_noop(u'Kashmiri'), u'कश्मीरी - (كشميري'),
'ksh': (gettext_noop(u'Colognian'), u'Kölsch'),
'ku': (gettext_noop(u'Kurdish'), u'Kurdî / كوردی'),
'kv': (gettext_noop(u'Komi'), u'Komi'),
'kw': (gettext_noop(u'Cornish'), u'Kernewek/Karnuack'),
'ky': (gettext_noop(u'Kyrgyz'), u'Kırgızca'),
'la': (gettext_noop(u'Latin'), u'Latina'),
'ltg': (gettext_noop(u'Latgalian'), u'Latgalian'),
'lld': (gettext_noop(u'Ladin'), u'Ladino'),
'lb': (gettext_noop(u'Luxembourgish'), u'Lëtzebuergesch'),
'lg': (gettext_noop(u'Ganda'), u'Ganda'),
'li': (gettext_noop(u'Limburgish'), u'Limburgs'),
'lin': (gettext_noop(u'Lingala'), u'Lingala'),
'lkt': (gettext_noop(u'Lakota'), u'Lakota'),
'lo': (gettext_noop(u'Lao'), u'Lao'),
'lt': (gettext_noop(u'Lithuanian'), u'Lietuvių'),
'lu': (gettext_noop(u'Luba-Katagana'), u'Luba-Katagana'),
'lua': (gettext_noop(u'Luba-Kasai'), u'Luba-Kasai'),
'luo': (gettext_noop(u'Luo'), u'Luo'),
'luy': (gettext_noop(u'Luhya'), u'Luhya'),
'lv': (gettext_noop(u'Latvian'), u'Latviešu'),
'meta-audio': (gettext_noop(u'Metadata: Audio Description'), u'Metadata: Audio Description'),
'meta-geo': (gettext_noop(u'Metadata: Geo'), u'Metadata: Geo'),
'meta-tw': (gettext_noop(u'Metadata: Twitter'), u'Metadata: Twitter'),
'meta-wiki': (gettext_noop(u'Metadata: Wikipedia'), u'Metadata: Wikipedia'),
'mad': (gettext_noop(u'Madurese'), u'Madurese'),
'mh': (gettext_noop(u'Marshallese'), u'Ebon'),
'mi': (gettext_noop(u'Maori'), u'Māori'),
'mk': (gettext_noop(u'Macedonian'), u'Македонски'),
'ml': (gettext_noop(u'Malayalam'), u'Malayalam'),
'mlg': (gettext_noop(u'Malagasy'), u'Malagasy'),
'mn': (gettext_noop(u'Mongolian'), u'Монгол'),
'mnk': (gettext_noop(u'Mandinka'), u'Mandinka'),
'mo': (gettext_noop(u'Moldavian, Moldovan'), u'Moldoveana'),
'moh': (gettext_noop(u'Mohawk'), u'Mohawk'),
'mni': (gettext_noop(u'Manipuri'), u'মৈইতৈইলোন'),
'mos': (gettext_noop(u'Mossi'), u'Mossi'),
'mr': (gettext_noop(u'Marathi'), u'मराठी'),
'ms': (gettext_noop(u'Malay'), u'Bahasa Melayu'),
'mt': (gettext_noop(u'Maltese'), u'bil-Malti'),
'mus': (gettext_noop(u'Muscogee'), u'Muscogee'),
'my': (gettext_noop(u'Burmese'), u'Myanmasa'),
'nan': (gettext_noop(u'Hokkien'), u'Hokkien'),
'na': (gettext_noop(u'Naurunan'), u'dorerin Naoero'),
'nb': (gettext_noop(u'Norwegian Bokmal'), u'Norsk Bokmål'),
'nci': (gettext_noop(u'Nahuatl, Classical'), u'Nahuatl, Classical'),
'ncj': (gettext_noop(u'Nahuatl, Northern Puebla'), u'Nahuatl, Northern Puebla'),
'nd': (gettext_noop(u'North Ndebele'), u'North Ndebele'),
'ne': (gettext_noop(u'Nepali'), u'नेपाली'),
'ng': (gettext_noop(u'Ndonga'), u'Ndonga'),
'nl': (gettext_noop(u'Dutch'), u'Nederlands'),
'nl-be': (gettext_noop(u'Dutch (Belgium)'), u'Nederlands (België)'),
'nn': (gettext_noop(u'Norwegian Nynorsk'), u'Nynorsk'),
'no': (gettext_noop(u'Norwegian'), u'Norwegian'),
'nr': (gettext_noop(u'Southern Ndebele'), u'Southern Ndebele'),
'nso': (gettext_noop(u'Northern Sotho'), u'Northern Sotho'),
'nv': (gettext_noop(u'Navajo'), u'Navajo'),
'nya': (gettext_noop(u'Chewa'), u'Chewa'),
'oc': (gettext_noop(u'Occitan'), u'Occitan'),
'oji': (gettext_noop(u'Ojibwe'), u'Ojibwe'),
'or': (gettext_noop(u'Oriya'), u'Oriya'),
'orm': (gettext_noop(u'Oromo'), u'Oromoo'),
'os': (gettext_noop(u'Ossetian, Ossetic'), u'Ossetian, Ossetic'),
'pam': (gettext_noop(u'Kapampangan'), u'Pampango'),
'pap': (gettext_noop(u'Papiamento'), u'Papiamentu'),
'pan': (gettext_noop(u'Eastern Punjabi'), u'ਪੂਰਬੀ ਨੂੰ ਪੰਜਾਬੀ'),
'pi': (gettext_noop(u'Pali'), u'पािऴ'),
'pl': (gettext_noop(u'Polish'), u'Polski'),
'pnb': (gettext_noop(u'Western Punjabi'), u'پنجابی'),
'prs': (gettext_noop(u'Dari'), u'دری'),
'ps': (gettext_noop(u'Pashto'), u'پښتو'),
'pt': (gettext_noop(u'Portuguese'), u'Português'),
'pt-br': (gettext_noop(u'Portuguese, Brazilian'), u'Portuguese, Brazilian'),
'que': (gettext_noop(u'Quechua'), u'Runa Simi'),
'qvi': (gettext_noop(u'Quichua, Imbabura Highland'), u'Quichua, Imbabura Highland'),
'raj': (gettext_noop(u'Rajasthani'), u'राजस्थानी'),
'rm': (gettext_noop(u'Romansh'), u'Rumantsch'),
'ro': (gettext_noop(u'Romanian'), u'Română'),
'ru': (gettext_noop(u'Russian'), u'Русский'),
'run': (gettext_noop(u'Rundi'), u'Kirundi'),
'rup': (gettext_noop(u'Macedo'), u'Macedo'),
'ry': (gettext_noop(u'Rusyn'), u'Rusyn'),
'sa': (gettext_noop(u'Sanskrit'), u'संस्कृतम्'),
'sc': (gettext_noop(u'Sardinian'), u'Sardu'),
'sco': (gettext_noop(u'Scots'), u'Scots'),
'sd': (gettext_noop(u'Sindhi'), u'سنڌي'),
'se': (gettext_noop(u'Northern Sami'), u'Northern Sami'),
'sg': (gettext_noop(u'Sango'), u'Sängö'),
'sgn': (gettext_noop(u'Sign Languages'), u'Sign Languages'),
'sh': (gettext_noop(u'Serbo-Croatian'), u'Srpskohrvatski'),
'si': (gettext_noop(u'Sinhala'), u'Sinhalese'),
'sk': (gettext_noop(u'Slovak'), u'Slovenčina'),
'skx': (gettext_noop(u'Seko Padang'), u'Sua Tu Padang'),
'sl': (gettext_noop(u'Slovenian'), u'Slovenščina'),
'sm': (gettext_noop(u'Samoan'), u'Gagana Samoa'),
'sna': (gettext_noop(u'Shona'), u'chiShona'),
'som': (gettext_noop(u'Somali'), u'Soomaaliga'),
'sot': (gettext_noop(u'Sotho'), u'seSotho'),
'sq': (gettext_noop(u'Albanian'), u'Shqip'),
'sr': (gettext_noop(u'Serbian'), u'Српски / Srpski'),
'sr-latn': (gettext_noop(u'Serbian, Latin'), u'Serbian, Latin'),
'srp': (gettext_noop(u'Montenegrin'), u'Crnogorski jezik, Црногорски језик'),
'ss': (gettext_noop(u'Swati'), u'SiSwati'),
'st': (gettext_noop(u'Southern Sotho'), u'Sesotho'),
'su': (gettext_noop(u'Sundanese'), u'Basa Sunda'),
'sv': (gettext_noop(u'Swedish'), u'Svenska'),
'swa': (gettext_noop(u'Swahili'), u'Kiswahili'),
'szl': (gettext_noop(u'Silesian'), u'ślōnskŏ gŏdka'),
'ta': (gettext_noop(u'Tamil'), u'தமிழ்'),
'tar': (gettext_noop(u'Tarahumara, Central'), u'Ralámul'),
'tet': (gettext_noop(u'Tetum'), u'Tetum'),
'te': (gettext_noop(u'Telugu'), u'తెలుగు'),
'tg': (gettext_noop(u'Tajik'), u'Тоҷикӣ'),
'th': (gettext_noop(u'Thai'), u'ไทย'),
'tir': (gettext_noop(u'Tigrinya'), u'Tigrinya'),
'tk': (gettext_noop(u'Turkmen'), u'تركمن / Туркмен'),
'tl': (gettext_noop(u'Tagalog'), u'Tagalog'),
'tlh': (gettext_noop(u'Klingon'), u'tlhIngan-Hol'),
'to': (gettext_noop(u'Tonga'), u'faka Tonga'),
'toj': (gettext_noop(u'Tojolabal'), u'Tojolabal'),
'tr': (gettext_noop(u'Turkish'), u'Türkçe'),
'ts': (gettext_noop(u'Tsonga'), u'Xitsonga'),
'tsn': (gettext_noop(u'Tswana'), u'Setswana'),
'tsz': (gettext_noop(u'Purepecha'), u'Purepecha'),
'tt': (gettext_noop(u'Tatar'), u'Tatarça / Татарча'),
'tw': (gettext_noop(u'Twi'), u'Twi'),
'ty': (gettext_noop(u'Tahitian'), u'Tahitian'),
'tzh': (gettext_noop(u'Tzeltal, Oxchuc'), u'Tzeltal, Oxchuc'),
'tzo': (gettext_noop(u'Tzotzil, Venustiano Carranza'),
u'Tzotzil, Venustiano Carranza'),
'uk': (gettext_noop(u'Ukrainian'), u'Українська'),
'umb': (gettext_noop(u'Umbundu'), u'Umbundu'),
'ug': (gettext_noop(u'Uyghur'), u'ئۇيغۇر'),
'ur': (gettext_noop(u'Urdu'), u'اڙدو'),
'uz': (gettext_noop(u'Uzbek'), u'O‘zbek'),
've': (gettext_noop(u'Venda'), u'Venda'),
'vi': (gettext_noop(u'Vietnamese'), u'Tiếng Việt'),
'vls': (gettext_noop(u'Flemish'), u'Vlaams'),
'vo': (gettext_noop(u'Volapuk'), u'Volapük'),
'wa': (gettext_noop(u'Walloon'), u'Walon'),
'wbl': (gettext_noop(u'Wakhi'), u'Wakhi'),
'wol': (gettext_noop(u'Wolof'), u'Wollof'),
'xho': (gettext_noop(u'Xhosa'), u'isiXhosa'),
'yaq': (gettext_noop(u'Yaqui'), u'Yaqui'),
'yi': (gettext_noop(u'Yiddish'), u'ייִדיש'),
'yor': (gettext_noop(u'Yoruba'), u'Yorùbá'),
'yua': (gettext_noop(u'Maya, Yucatán'), u'Maya, Yucatán'),
'za': (gettext_noop(u'Zhuang, Chuang'), u'Cuengh'),
'zam': (gettext_noop(u'Zapotec, Miahuatlán'), u'Zapotec, Miahuatlán'),
'zh': (gettext_noop(u'Chinese, Yue'), u'中文'),
'zh-cn': (gettext_noop(u'Chinese, Simplified'), u'简体中文'),
'zh-tw': (gettext_noop(u'Chinese, Traditional'), u'繁體中文'),
'zh-sg': (gettext_noop(u'Chinese, Simplified (Singaporean)'), u''),
'zh-hk': (gettext_noop(u'Chinese, Traditional (Hong Kong)'), u''),
'zul': (gettext_noop(u'Zulu'), u'isiZulu'),
})
def _add_iso_639_1():
add_standard('iso-639-1', {
'ab': 'ab',
'aa': 'aa',
'af': 'af',
'ak': 'aka',
'sq': 'sq',
'am': 'amh',
'ar': 'ar',
'an': 'an',
'hy': 'hy',
'as': 'as',
'av': 'av',
'ae': 'ae',
'ay': 'ay',
'az': 'az',
'bm': 'bam',
'ba': 'ba',
'eu': 'eu',
'be': 'be',
'bn': 'bn',
'bh': 'bh',
'bi': 'bi',
'bs': 'bs',
'br': 'br',
'bg': 'bg',
'my': 'my',
'ca': 'ca',
'km': 'km',
'ch': 'ch',
'ce': 'ce',
'ny': 'nya',
'zh': 'zh',
'cu': 'cu',
'cv': 'cv',
'kw': 'kw',
'co': 'co',
'cr': 'cr',
'hr': 'hr',
'cs': 'cs',
'da': 'da',
'dv': 'dv',
'nl': 'nl',
'dz': 'dz',
'en': 'en',
'eo': 'eo',
'et': 'et',
'ee': 'ee',
'fo': 'fo',
'fj': 'fj',
'fi': 'fi',
'fr': 'fr',
'ff': 'ff',
'gl': 'gl',
'lg': 'lg',
'ka': 'ka',
'de': 'de',
'el': 'el',
'gn': 'gn',
'gu': 'gu',
'ht': 'ht',
'ha': 'hau',
'he': 'he',
'hz': 'hz',
'hi': 'hi',
'ho': 'ho',
'hu': 'hu',
'is': 'is',
'io': 'io',
'ig': 'ibo',
'id': 'id',
'ia': 'ia',
'ie': 'ie',
'iu': 'iu',
'ik': 'ik',
'ga': 'ga',
'it': 'it',
'ja': 'ja',
'jv': 'jv',
'kl': 'kl',
'kn': 'kn',
'kr': 'kau',
'ks': 'ks',
'kk': 'kk',
'ki': 'kik',
'rw': 'kin',
'ky': 'ky',
'kv': 'kv',
'kg': 'kon',
'ko': 'ko',
'kj': 'kj',
'ku': 'ku',
'lo': 'lo',
'la': 'la',
'lv': 'lv',
'li': 'li',
'ln': 'lin',
'lt': 'lt',
'lu': 'lu',
'lb': 'lb',
'mk': 'mk',
'mg': 'mlg',
'ms': 'ms',
'ml': 'ml',
'mt': 'mt',
'gv': 'gv',
'mi': 'mi',
'mr': 'mr',
'mh': 'mh',
'mo': 'mo',
'mn': 'mn',
'na': 'na',
'nv': 'nv',
'ng': 'ng',
'ne': 'ne',
'nd': 'nd',
'se': 'se',
'no': 'nb',
'nb': 'nb',
'nn': 'nn',
'oc': 'oc',
'oj': 'oji',
'or': 'or',
'om': 'orm',
'os': 'os',
'pi': 'pi',
'pa': 'pa',
'fa': 'fa',
'pl': 'pl',
'pt': 'pt',
'ps': 'ps',
'qu': 'que',
'ro': 'ro',
'rm': 'rm',
'rn': 'run',
'ru': 'ru',
'ry': 'ry',
'sm': 'sm',
'sg': 'sg',
'sa': 'sa',
'sc': 'sc',
'gd': 'gd',
'sr': 'sr',
'sh': 'sh',
'sn': 'sna',
'ii': 'ii',
'sd': 'sd',
'si': 'si',
'sk': 'sk',
'sl': 'sl',
'so': 'som',
'st': 'sot',
'nr': 'nr',
'es': 'es',
'su': 'su',
'sw': 'swa',
'ss': 'ss',
'sv': 'sv',
'tl': 'tl',
'ty': 'ty',
'tg': 'tg',
'ta': 'ta',
'tt': 'tt',
'te': 'te',
'th': 'th',
'bo': 'bo',
'ti': 'tir',
'to': 'to',
'ts': 'ts',
'tn': 'tsn',
'tr': 'tr',
'tk': 'tk',
'tw': 'tw',
'ug': 'ug',
'uk': 'uk',
'ur': 'ur',
'uz': 'uz',
've': 've',
'vi': 'vi',
'vo': 'vo',
'wa': 'wa',
'cy': 'cy',
'fy': 'fy-nl',
'wo': 'wol',
'xh': 'xho',
'yi': 'yi',
'yo': 'yor',
'za': 'za',
'zu': 'zul',
})
def _add_django():
add_standard('django', {
'ar': 'ar',
'az': 'az',
'bg': 'bg',
'bn': 'bn',
'bs': 'bs',
'ca': 'ca',
'cs': 'cs',
'cy': 'cy',
'da': 'da',
'de': 'de',
'el': 'el',
'en': 'en',
'en-gb': 'en-gb',
'es': 'es',
'es-ar': 'es-ar',
'es-mx': 'es-mx',
'es-ni': 'es-ni',
'et': 'et',
'eu': 'eu',
'fa': 'fa',
'fi': 'fi',
'fr': 'fr',
'fr-ca': 'fr-ca',
'fy-nl': 'fy-nl',
'ga': 'ga',
'gl': 'gl',
'he': 'he',
'hi': 'hi',
'hr': 'hr',
'hu': 'hu',
'id': 'id',
'is': 'is',
'it': 'it',
'ja': 'ja',
'ka': 'ka',
'km': 'km',
'kn': 'kn',
'ko': 'ko',
'lt': 'lt',
'lv': 'lv',
'mk': 'mk',
'ml': 'ml',
'mn': 'mn',
'nl': 'nl',
'nb': 'nb',
'nn': 'nn',
'pl': 'pl',
'pt': 'pt',
'pt-br': 'pt-br',
'ro': 'ro',
'ru': 'ru',
'sk': 'sk',
'sl': 'sl',
'sq': 'sq',
'sr': 'sr',
'sr-latn': 'sr-latn',
'sv': 'sv',
'ta': 'ta',
'te': 'te',
'th': 'th',
'tr': 'tr',
'uk': 'uk',
'ur': 'ur',
'vi': 'vi',
'zh-cn': 'zh-cn',
'zh-tw': 'zh-tw',
})
def _add_unisubs():
add_standard('unisubs', {
'aa': 'aa',
'ab': 'ab',
'ae': 'ae',
'af': 'af',
'aka': 'aka',
'amh': 'amh',
'an': 'an',
'arc': 'arc',
'arq': 'arq',
'as': 'as',
'ase': 'ase',
'ast': 'ast',
'av': 'av',
'ay': 'ay',
'ba': 'ba',
'bam': 'bam',
'be': 'be',
'ber': 'ber',
'bh': 'bh',
'bi': 'bi',
'bnt': 'bnt',
'bo': 'bo',
'br': 'br',
'bug': 'bug',
'cak': 'cak',
'ce': 'ce',
'ceb': 'ceb',
'ch': 'ch',
'cho': 'cho',
'cku': 'cku',
'co': 'co',
'cr': 'cr',
'ctu': 'ctu',
'ctd': 'ctd',
'cu': 'cu',
'cv': 'cv',
'de-at': 'de-at',
'de-ch': 'de-ch',
'din': 'din',
'dv': 'dv',
'dz': 'dz',
'ee': 'ee',
'efi': 'efi',
'en-ca': 'en-ca',
'en-gb': 'en-gb',
'en-ie': 'en-ie',
'eo': 'eo',
'es-419': 'es-419',
'es-ar': 'es-ar',
'fa-af': 'fa-af',
'ff': 'ff',
'fil': 'fil',
'fj': 'fj',
'fo': 'fo',
'fr-be': 'fr-be',
'fr-ca': 'fr-ca',
'fr-ch': 'fr-ch',
'ful': 'ful',
'fy': 'fy',
'ga': 'ga',
'gd': 'gd',
'gn': 'gn',
'gu': 'gu',
'gv': 'gv',
'hai': 'hai',
'hau': 'hau',
'haw': 'haw',
'haz': 'haz',
'hb': 'hb',
'hch': 'hch',
'ho': 'ho',
'ht': 'ht',
'hup': 'hup',
'hus': 'hus',
'hy': 'hy',
'hz': 'hz',
'ia': 'ia',
'ibo': 'ibo',
'ie': 'ie',
'ii': 'ii',
'ik': 'ik',
'ilo': 'ilo',
'iro': 'iro',
'inh': 'inh',
'inh': 'inh',
'io': 'io',
'iro': 'iro',
'iu': 'iu',
'jv': 'jv',
'kar': 'kar',
'kau': 'kau',
'kik': 'kik',
'kin': 'kin',
'kj': 'kj',
'kk': 'kk',
'kl': 'kl',
'kon': 'kon',
'ks': 'ks',
'ksh' : 'ksh',
'ku': 'ku',
'kv': 'kv',
'kw': 'kw',
'ky': 'ky',
'la': 'la',
'lld': 'lld',
'lb': 'lb',
'lg': 'lg',
'li': 'li',
'lin': 'lin',
'lkt': 'lkt',
'lo': 'lo',
'ltg': 'ltg',
'lu': 'lu',
'lua': 'lua',
'luo': 'luo',
'luy': 'luy',
'meta-audio':'meta-audio',
'meta-geo': 'meta-geo',
'meta-tw': 'meta-tw',
'meta-wiki': 'meta-wiki',
'mad': 'mad',
'mh': 'mh',
'mi': 'mi',
'ml': 'ml',
'mlg': 'mlg',
'mni': 'mni',
'mnk': 'mnk',
'mo': 'mo',
'moh': 'moh',
'mos': 'mos',
'mr': 'mr',
'ms': 'ms',
'mt': 'mt',
'mus': 'mus',
'my': 'my',
'na': 'na',
'nan': 'nan',
'nci': 'nci',
'nd': 'nd',
'ne': 'ne',
'ng': 'ng',
'nl-be': 'nl-be',
'nr': 'nr',
'nso': 'nso',
'nv': 'nv',
'nya': 'nya',
'oc': 'oc',
'oji': 'oji',
'or': 'or',
'orm': 'orm',
'os': 'os',
'pam': 'pam',
'pan': 'pan',
'pap': 'pap',
'pi': 'pi',
'pnb': 'pnb',
'prs': 'prs',
'ps': 'ps',
'pt-br': 'pt-br',
'que': 'que',
'qvi': 'qvi',
'raj': 'raj',
'rm': 'rm',
'run': 'run',
'rup': 'rup',
'ry': 'ry',
'sa': 'sa',
'sc': 'sc',
'sco': 'sco',
'sd': 'sd',
'se': 'se',
'sg': 'sg',
'sgn': 'sgn',
'skx': 'skx',
'sh': 'sh',
'si': 'si',
'sm': 'sm',
'sna': 'sna',
'som': 'som',
'sot': 'sot',
'sr-latn': 'sr-latn',
'srp': 'srp',
'ss': 'ss',
'st': 'st',
'su': 'su',
'swa': 'swa',
'szl': 'szl',
'tar': 'tar',
'tet': 'tet',
'tg': 'tg',
'tir': 'tir',
'tk': 'tk',
'tl': 'tl',
'tlh': 'tlh',
'to': 'to',
'toj': 'toj',
'ts': 'ts',
'tsz': 'tsz',
'tsn': 'tsn',
'tzh': 'tzh',
'tzo': 'tzo',
'tt': 'tt',
'tw': 'tw',
'ty': 'ty',
'ug': 'ug',
'umb': 'umb',
'uz': 'uz',
've': 've',
'vls': 'vls',
'vo': 'vo',
'wa': 'wa',
'wbl': 'wbl',
'wol': 'wol',
'xho': 'xho',
'yaq': 'yaq',
'yi': 'yi',
'yor': 'yor',
'yua': 'yua',
'za': 'za',
'zam': 'zam',
'zh': 'zh',
'zh-cn': 'zh-cn',
'zh-tw': 'zh-tw',
'zh-sg': 'zh-sg',
'zh-hk': 'zh-hk',
'zul': 'zul',
}, base='django')
def _add_youtube():
add_standard('youtube', {
'aa': 'aa',
'ab': 'ab',
'ae': 'ae',
'af': 'af',
'ak': 'aka',
'am': 'amh',
'an': 'an',
'ar': 'ar',
'as': 'as',
'ast': 'ast',
'av': 'av',
'ay': 'ay',
'az': 'az',
'ba': 'ba',
'bm': 'bam',
'ber': 'ber',
'be': 'be',
'bg': 'bg',
'bh': 'bh',
'bi': 'bi',
'bn': 'bn',
'bnt': 'bnt',
'bo': 'bo',
'br': 'br',
'bs': 'bs',
'ce': 'ce',
'ceb': 'ceb',
'ca': 'ca',
'ch': 'ch',
'cho': 'cho',
'co': 'co',
'cr': 'cr',
'cs': 'cs',
'cu': 'cu',
'cv': 'cv',
'cy': 'cy',
'da': 'da',
'de': 'de',
'dv': 'dv',
'dz': 'dz',
'ee': 'ee',
'efi': 'efi',
'el': 'el',
'en': 'en',
'en-GB': 'en-gb',
'en-US': 'en',
'eo': 'eo',
'es-AR': 'es-ar',
'es-ES': 'es',
'es-NI': 'es-ni',
'es-MX': 'es-mx',
'et': 'et',
'eu': 'eu',
'fa': 'fa',
'fa-AF': 'fa',
'fi': 'fi',
'fil': 'fil',
'ff': 'ff',
'fj': 'fj',
'fo': 'fo',
'fr': 'fr',
'fr-CA': 'fr-ca',
'fy': 'fy-nl',
'ga': 'ga',
'gd': 'gd',
'gl': 'gl',
'gn': 'gn',
'gu': 'gu',
'gv': 'gv',
'ha': 'hau',
'hai': 'hai',
'hi': 'hi',
'ho': 'ho',
'hr': 'hr',
'hu': 'hu',
'ht': 'ht',
'hup': 'hup',
'hy': 'hy',
'hz': 'hz',
'ia': 'ia',
'id': 'id',
'ie': 'ie',
'ig': 'ibo',
'ii': 'ii',
'ik': 'ik',
'ilo': 'ilo',
'inh': 'inh',
'io': 'io',
'iu': 'iu',
'iro': 'iro',
'is': 'is',
'it': 'it',
'iw': 'he',
'ja': 'ja',
'jv': 'jv',
'ka': 'ka',
'kar': 'kar',
'kg': 'kon',
'ki': 'kik',
'kk': 'kk',
'kj': 'kj',
'kl': 'kl',
'km': 'km',
'kn': 'kn',
'ko': 'ko',
'ks': 'ks',
'ksh': 'ksh',
'kr': 'kau',
'ku': 'ku',
'ky': 'ky',
'kv': 'kv',
'kw': 'kw',
'la': 'la',
'lb': 'lb',
'lg': 'lg',
'li': 'li',
'lld': 'lld',
'ln': 'lin',
'lo': 'lo',
'lt': 'lt',
'lu': 'lu',
'lua': 'lua',
'luo': 'luo',
'luy': 'luy',
'lv': 'lv',
'mad': 'mad',
'mg': 'mlg',
'mh': 'mh',
'mi': 'mi',
'mk': 'mk',
'ml': 'ml',
'mn': 'mn',
'mni': 'mni',
'mo': 'mo',
'moh': 'moh',
'mos': 'mos',
'mr': 'mr',
'ms': 'ms',
'mt': 'mt',
'my': 'my',
'na': 'na',
'nd': 'nd',
'ne': 'ne',
'ng': 'ng',
'nl': 'nl',
'nl-BE': 'nl',
'nn': 'nn',
'no': 'nb',
'nb': 'nb',
'nr': 'nr',
'nso': 'nso',
'nv': 'nv',
'ny': 'nya',
'oc': 'oc',
'oj': 'oji',
'om': 'orm',
'or': 'or',
'os': 'os',
'pa': 'pa',
'pap': 'pap',
'pi': 'pi',
'pl': 'pl',
'ps': 'ps',
'pt-BR': 'pt-br',
'pt-PT': 'pt',
'qu': 'que',
'rm': 'rm',
'rn': 'run',
'ro': 'ro',
'ru': 'ru',
'rup': 'rup',
'rw': 'kin',
'rue-UA': 'ry',
'sa': 'sa',
'sc': 'sc',
'sd': 'sd',
'se': 'se',
'sg': 'sg',
'sh': 'sh',
'si': 'si',
'sk': 'sk',
'sl': 'sl',
'sm': 'sm',
'sn': 'sna',
'so': 'som',
'sq': 'sq',
'sr': 'sr',
'sr-Latn': 'sr-latn',
'ss': 'ss',
'st': 'sot',
'su': 'su',
'sv': 'sv',
'sw': 'swa',
'ta': 'ta',
'te': 'te',
'tet': 'tet',
'tg': 'tg',
'th': 'th',
'ti': 'tir',
'tk': 'tk',
'tl': 'tl',
'tlh': 'tlh',
'tn': 'tsn',
'to': 'to',
'tr': 'tr',
'ts': 'ts',
'tt': 'tt',
'ty': 'ty',
'tw': 'tw',
'uk': 'uk',
'ug': 'ug',
'ur': 'ur',
'umb': 'umb',
'uz': 'uz',
've': 've',
'vi': 'vi',
'vo': 'vo',
'wa': 'wa',
'wo': 'wol',
'xh': 'xho',
'yi': 'yi',
'yo': 'yor',
'zh': 'zh-hk',
'zh-CN': 'zh-cn',
'zh-HK': 'zh-hk',
'zh-Hans': 'zh-cn',
'zh-Hant': 'zh-tw',
'zh_Hant-HK': 'nan',
# we need to fix unilangs what to do when
# two dialects point to the same main language
'zh-SG': 'zh-sg',
'zh-TW': 'zh-tw',
'za': 'za',
'zu': 'zul'})
def _add_vimeo():
# Adding in just the things that differ, and not all langs.
add_standard('vimeo', {
#vimeo code : amara code
'am': 'amh',
'en-us': 'en',
'es-es': 'es',
'fy-nl': 'fy',
'ha': 'hau',
'iw': 'he',
'ln': 'lin',
'mg': 'mig',
'om': 'orm',
'pa': 'pan',
'pt-pt': 'pt',
'qu': 'que',
'rn': 'run',
'rw': 'kin',
'sn': 'sna',
'so': 'som',
'sw': 'swa',
'ti': 'tir',
'tn': 'tsn',
'wo': 'wol',
'xh': 'xho',
'yo': 'yor',
'zh-hans': 'zh-cn',
'zh-hant': 'zh-tw',
'zu': 'zul',
}
)
def _add_bcp47():
add_standard_custom('bcp47', StrictBCP47ToUnilangConverter(),
UNILANGS_TO_BCP47)
add_standard_custom('bcp47-lossy', LossyBCP47ToUnilangConverter(),
UNILANGS_TO_BCP47)
_generate_initial_data()
_add_iso_639_1()
_add_django()
_add_unisubs()
_add_youtube()
_add_bcp47()
class LanguageCode(object):
def __init__(self, language_code, standard):
try:
standard_dict = TO_INTERNAL[standard.lower()]
except KeyError:
raise Exception("Standard '%s' is not registred" % standard)
self._code = standard_dict[language_code]
def encode(self, standard, fuzzy=False):
"""Return the code for this language in the given standard."""
if fuzzy:
return self._fuzzy_encode(standard)
else:
return FROM_INTERNAL[standard.lower()][self._code]
def _fuzzy_encode(self, standard):
"""Return the code or closest approximate for this language in the given standard.
This will try harder than the `encode()` function, but may result in
data loss. For example:
>>> lc = LanguageCode('en-gb', 'django')
>>> lc.name()
'British English'
>>> lc.encode('iso-639-1')
KeyError...
>>> lc.fuzzy_encode('iso-639-1')
'en'
Here's an example of how you can lose data:
>>> original = 'en-gb' # Start with 'en-gb'
>>> lc = LanguageCode(original, 'django') # Decode as Django
>>> new_lang = lc.fuzzy_encode('iso-639-1') # Fuzzy encode to ISO-639-1
>>> new_lc = LanguageCode(new_lang, 'iso-639-1') # Decode as ISO-639-1
>>> result = new_lc.encode('django') # Encode back to Django
>>> assert original != result
"""
# TODO: This.
return
def name(self):
"""Return the English name for this language as a unicode string.
Note: The strings returned from this function have already been marked
with gettext_noop, so they should be safe to use with gettext to
translate into another language.
"""
return INTERNAL_NAMES[self._code][0]
def native_name(self):
"""Return the native name for this language as a unicode string."""
return INTERNAL_NAMES[self._code][1]
def aliases(self):
"""Return the "aliases" for this language code.
This is easiest to describe with an example:
>>> LanguageCode('en', 'iso-639-1').aliases()
{ 'iso-639-1': 'en',
'iso-639-2': 'eng',
'django': 'en',
# ...
}
"""
standards = FROM_INTERNAL.keys()
return dict([(standard, FROM_INTERNAL[standard][self._code])
for standard in standards
if FROM_INTERNAL[standard].get(self._code)])
def get_language_name_mapping(standard):
"""Return a dict of code -> english name for all languages in the standard."""
return dict((code, LanguageCode(code, standard).name())
for code in TO_INTERNAL.get(standard))
def get_language_native_mapping(standard):
"""Return a dict of code -> native name for all languages in the standard."""
return dict((code, LanguageCode(code, standard).native_name())
for code in TO_INTERNAL.get(standard))
def get_language_code_mapping(standard):
"""Return a dict of code -> LanguageCode for all languages in the standard."""
return dict((code, LanguageCode(code, standard))
for code in TO_INTERNAL.get(standard))
def _debug_missing_languages(standard):
"""Return a list of all the languages missing from the given standard."""
return [(internal_code, name)
for internal_code, name in INTERNAL_NAMES.items()
if internal_code not in FROM_INTERNAL]
def _debug_missing_language_codes(standard, reference_standard='unisubs'):
"""
Return a list of all the languages codes missing from the given standard
"""
unisubs_langs = set(get_language_code_mapping(reference_standard).keys())
standard_langs = set()
[standard_langs.add(LanguageCode(lc, standard).encode(reference_standard)) \
for lc in get_language_code_mapping(standard).keys()]
return list(unisubs_langs.difference(standard_langs))
|
# The MIT License (MIT)
# Copyright (c) 2016 by the Cate Development Team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import json
import os.path
import signal
import socket
import subprocess
import sys
import time
import traceback
import urllib.request
from datetime import date, datetime
from threading import Timer
from typing import Optional
from cate.core.monitor import Monitor, ConsoleMonitor
from cate.core.util import cwd
from cate.ui.wsmanag import FSWorkspaceManager
from cate.version import __version__
from tornado.ioloop import IOLoop
from tornado.log import enable_pretty_logging
from tornado.web import RequestHandler, Application
# Explicitly load Cate-internal plugins.
__import__('cate.ds')
__import__('cate.ops')
CLI_NAME = 'cate-webapi'
LOCALHOST = '127.0.0.1'
# {{cate-config}}
# By default, WebAPI service will auto-exit after 2 hours of inactivity (if caller='cate', the CLI)
ON_INACTIVITY_AUTO_EXIT_AFTER = 120 * 60.0
# {{cate-config}}
# By default, WebAPI service will auto-exit after 5 seconds if all workspaces are closed (if caller='cate', the CLI)
ON_ALL_CLOSED_AUTO_EXIT_AFTER = 5.0
WEBAPI_LOG_FILE = os.path.join(os.path.expanduser('~'), '.cate', 'webapi.log')
class WebAPIServiceError(Exception):
def __init__(self, cause, *args, **kwargs):
if isinstance(cause, Exception):
super(WebAPIServiceError, self).__init__(str(cause), *args, **kwargs)
_, _, traceback = sys.exc_info()
self.with_traceback(traceback)
elif isinstance(cause, str):
super(WebAPIServiceError, self).__init__(cause, *args, **kwargs)
else:
super(WebAPIServiceError, self).__init__(*args, **kwargs)
self._cause = cause
@property
def cause(self):
return self._cause
# All JSON responses should have same structure, namely a dictionary as follows:
#
# {
# "status": "ok" | "error",
# "error": optional error-details,
# "content": optional content, if status "ok"
# }
def get_application():
application = Application([
(url_pattern('/'), VersionHandler),
(url_pattern('/ws/new'), WorkspaceNewHandler),
(url_pattern('/ws/get_open'), WorkspaceGetOpenHandler),
(url_pattern('/ws/get/{{base_dir}}'), WorkspaceGetHandler),
(url_pattern('/ws/open/{{base_dir}}'), WorkspaceOpenHandler),
(url_pattern('/ws/close/{{base_dir}}'), WorkspaceCloseHandler),
(url_pattern('/ws/close_all'), WorkspaceCloseAllHandler),
(url_pattern('/ws/save/{{base_dir}}'), WorkspaceSaveHandler),
(url_pattern('/ws/save_all'), WorkspaceSaveAllHandler),
(url_pattern('/ws/del/{{base_dir}}'), WorkspaceDeleteHandler),
(url_pattern('/ws/clean/{{base_dir}}'), WorkspaceCleanHandler),
(url_pattern('/ws/run_op/{{base_dir}}'), WorkspaceRunOpHandler),
(url_pattern('/ws/res/set/{{base_dir}}/{{res_name}}'), ResourceSetHandler),
(url_pattern('/ws/res/del/{{base_dir}}/{{res_name}}'), ResourceDeleteHandler),
(url_pattern('/ws/res/write/{{base_dir}}/{{res_name}}'), ResourceWriteHandler),
(url_pattern('/ws/res/plot/{{base_dir}}/{{res_name}}'), ResourcePlotHandler),
(url_pattern('/ws/res/print/{{base_dir}}'), ResourcePrintHandler),
(url_pattern('/exit'), ExitHandler)
])
application.workspace_manager = FSWorkspaceManager()
application.auto_exit_enabled = False
application.auto_exit_timer = None
application.service_info_file = None
application.time_of_last_activity = time.clock()
return application
def main(args=None):
if args is None:
args = sys.argv[1:]
parser = argparse.ArgumentParser(prog=CLI_NAME,
description='ESA CCI Toolbox WebAPI tool, version %s' % __version__)
parser.add_argument('--version', '-V', action='version', version='%s %s' % (CLI_NAME, __version__))
parser.add_argument('--port', '-p', dest='port', metavar='PORT', type=int,
help='run WebAPI service on port number PORT')
parser.add_argument('--address', '-a', dest='address', metavar='ADDRESS',
help='run WebAPI service using address ADDRESS', default='')
parser.add_argument('--caller', '-c', dest='caller', default=CLI_NAME,
help='name of the calling application')
parser.add_argument('--file', '-f', dest='file', metavar='FILE',
help="if given, service information will be written to (start) or read from (stop) FILE")
parser.add_argument('command', choices=['start', 'stop'],
help='start or stop the service')
args_obj = parser.parse_args(args)
kwargs = dict(port=args_obj.port,
address=args_obj.address,
caller=args_obj.caller,
service_info_file=args_obj.file)
if args_obj.command == 'start':
start_service(**kwargs)
else:
stop_service(kill_after=5.0, timeout=5.0, **kwargs)
def start_service_subprocess(port: int = None,
address: str = None,
caller: str = None,
service_info_file: str = None,
timeout: float = 10.0) -> None:
port = port or find_free_port()
command = _join_command('start', port, address, caller, service_info_file)
webapi = subprocess.Popen(command, shell=True)
webapi_url = 'http://%s:%s/' % (address or LOCALHOST, port)
t0 = time.clock()
while True:
exit_code = webapi.poll()
if exit_code is not None:
# Process terminated, we can return now, as there will be no running service
raise WebAPIServiceError('Cate WebAPI service terminated with exit code %d' % exit_code)
# noinspection PyBroadException
try:
urllib.request.urlopen(webapi_url, timeout=2)
# Success!
return
except Exception:
pass
time.sleep(0.1)
t1 = time.clock()
if t1 - t0 > timeout:
raise TimeoutError('Cate WebAPI service timeout, exceeded %d sec' % timeout)
def stop_service_subprocess(port: int = None,
address: str = None,
caller: str = None,
service_info_file: str = None,
timeout: float = 10.0) -> None:
command = _join_command('stop', port, address, caller, service_info_file)
exit_code = subprocess.call(command, shell=True, timeout=timeout)
if exit_code != 0:
raise WebAPIServiceError('Cate WebAPI service terminated with exit code %d' % exit_code)
def _join_command(sub_command, port, address, caller, service_info_file):
command = '"%s" -m cate.ui.webapi' % sys.executable
if port:
command += ' -p %d' % port
if address:
command += ' -a "%s"' % address
if caller:
command += ' -c "%s"' % caller
if service_info_file:
command += ' -f "%s"' % service_info_file
return command + ' ' + sub_command
def start_service(port: int = None, address: str = None, caller: str = None, service_info_file: str = None) -> dict:
"""
Start a WebAPI service.
The *service_info_file*, if given, represents the service in the filesystem, similar to
the ``/var/run/`` directory on Linux systems.
If the service file exist and its information is compatible with the requested *port*, *address*, *caller*, then
this function simply returns without taking any other actions.
:param port: the port number
:param address: the address
:param caller: the name of the calling application (informal)
:param service_info_file: If not ``None``, a service information JSON file will be written to *service_info_file*.
:return: service information dictionary
"""
if service_info_file and os.path.isfile(service_info_file):
service_info = read_service_info(service_info_file)
if is_service_compatible(port, address, caller, service_info):
port = service_info.get('port')
address = service_info.get('address') or LOCALHOST
if is_service_running(port, address):
print('Cate WebAPI service already running on %s:%s, reusing it' % (address, port))
return service_info
else:
# Try shutting down the service, even violently
stop_service(service_info_file, kill_after=5.0, timeout=5.0)
else:
print('warning: Cate WebAPI service info file exists: %s, removing it' % service_info_file)
os.remove(service_info_file)
import tornado.options
options = tornado.options.options
# Check, we should better use a log file per caller, e.g. "~/.cate/webapi-%s.log" % caller
options.log_file_prefix = WEBAPI_LOG_FILE
options.log_to_stderr = None
enable_pretty_logging()
application = get_application()
application.service_info_file = service_info_file
application.auto_exit_enabled = caller == 'cate'
port = port or find_free_port()
print('starting Cate WebAPI on %s:%s' % (address or LOCALHOST, port))
application.listen(port, address=address or '')
io_loop = IOLoop()
io_loop.make_current()
service_info = dict(port=port,
address=address,
caller=caller,
started=datetime.now().isoformat(sep=' '),
process_id=os.getpid())
if service_info_file:
_write_service_info(service_info, service_info_file)
# IOLoop.call_later(delay, callback, *args, **kwargs)
if application.auto_exit_enabled:
_install_next_inactivity_check(application)
IOLoop.instance().start()
return service_info
def stop_service(port=None,
address=None,
caller: str = None,
service_info_file: str = None,
kill_after: float = None,
timeout: float = 10.0) -> dict:
"""
Stop a WebAPI service.
:param port:
:param address:
:param caller:
:param service_info_file:
:param kill_after: if not ``None``, the number of seconds to wait after a hanging service process will be killed
:param timeout:
:return: service information dictionary
"""
service_info = {}
if service_info_file:
service_info = read_service_info(service_info_file)
if service_info is None and port is None:
raise RuntimeWarning('Cate WebAPI service not running')
service_info = service_info or {}
port = port or service_info.get('port')
address = address or service_info.get('address')
caller = caller or service_info.get('caller')
pid = service_info.get('process_id')
if not port:
raise WebAPIServiceError('cannot stop Cate WebAPI service on unknown port (caller: %s)' % caller)
address_and_port = '%s:%s' % (address or LOCALHOST, port)
print('stopping Cate WebAPI on %s' % address_and_port)
# noinspection PyBroadException
try:
with urllib.request.urlopen('http://%s/exit' % address_and_port, timeout=timeout * 0.3) as response:
response.read()
except:
# Either process does not exist, or timeout, or some other error
pass
# give the service a bit time to shut down before testing
time.sleep(kill_after * 0.5)
# Note: is_service_running() should be replaced by is_process_active(pid)
if kill_after and pid and is_service_running(port, address, timeout=timeout * 0.3):
# If we have a PID and the service runs
time.sleep(kill_after * 0.5)
# Note: is_service_running() should be replaced by is_process_active(pid)
if is_service_running(port, address, timeout=timeout * 0.3):
# noinspection PyBroadException
try:
os.kill(pid, signal.SIGTERM)
except:
pass
if os.path.isfile(service_info_file):
os.remove(service_info_file)
return dict(port=port, address=address, caller=caller, started=service_info.get('started', None))
def is_service_compatible(port: Optional[int], address: Optional[str], caller: Optional[str],
service_info: dict) -> bool:
if not port and not service_info.get('port'):
# This means we have a service_info without port, should actually never happen,
# but who knows, service_info_file may have been modified.
return False
port_ok = port == service_info.get('port') if port and port > 0 else True
address_ok = address == service_info.get('address') if address else True
caller_ok = caller == service_info.get('caller') if caller else True
return port_ok and address_ok and caller_ok
def is_service_running(port: int, address: str, timeout: float = 10.0) -> bool:
url = 'http://%s:%s/' % (address or '127.0.0.1', port)
try:
with urllib.request.urlopen(url, timeout=timeout) as response:
json_text = response.read()
except:
return False
json_response = json.loads(json_text.decode('utf-8'))
return json_response.get('status') == 'ok'
def find_free_port():
s = socket.socket()
# Bind to a free port provided by the host.
s.bind(('', 0))
free_port = s.getsockname()[1]
s.close()
# Return the port number assigned.
return free_port
def read_service_info(service_info_file: str) -> dict:
"""
Get a dictionary with WebAPI service information:::
{
"port": service-port-number (int)
"address": service-address (str)
"caller": caller-name (str)
"started": service-start-time (str)
}
:return: dictionary with WebAPI service information or ``None`` if it does not exist
:raise OSError, IOError: if information file exists, but could not be loaded
"""
if not service_info_file:
raise ValueError('service_info_file argument must be given')
if os.path.isfile(service_info_file):
with open(service_info_file) as fp:
return json.load(fp=fp) or {}
return None
def _write_service_info(service_info: dict, service_info_file: str) -> None:
if not service_info:
raise ValueError('service_info argument must be given')
if not service_info_file:
raise ValueError('service_info_file argument must be given')
os.makedirs(os.path.dirname(service_info_file), exist_ok=True)
with open(service_info_file, 'w') as fp:
json.dump(service_info, fp, indent=' ')
def _exit(application: Application):
service_info_file = application.service_info_file
if service_info_file and os.path.isfile(service_info_file):
# noinspection PyBroadException
try:
os.remove(service_info_file)
except:
pass
IOLoop.instance().stop()
def _install_next_inactivity_check(application):
IOLoop.instance().call_later(ON_INACTIVITY_AUTO_EXIT_AFTER, _check_inactivity, application)
def _check_inactivity(application: Application):
inactivity_time = time.clock() - application.time_of_last_activity
if inactivity_time > ON_INACTIVITY_AUTO_EXIT_AFTER:
print('stopping WebAPI service after %.1f seconds of inactivity' % inactivity_time)
_auto_exit(application)
else:
_install_next_inactivity_check(application)
def _auto_exit(application: Application):
IOLoop.instance().add_callback(_exit, application)
def _on_workspace_closed(application: Application):
if not application.auto_exit_enabled:
return
workspace_manager = application.workspace_manager
num_open_workspaces = workspace_manager.num_open_workspaces()
_check_auto_exit(application, num_open_workspaces == 0, ON_ALL_CLOSED_AUTO_EXIT_AFTER)
def _check_auto_exit(application: Application, condition: bool, interval: float):
if application.auto_exit_timer is not None:
# noinspection PyBroadException
try:
application.auto_exit_timer.cancel()
except:
pass
if condition:
application.auto_exit_timer = Timer(interval, _auto_exit, [application])
application.auto_exit_timer.start()
else:
application.auto_exit_timer = None
def _new_monitor() -> Monitor:
return ConsoleMonitor(stay_in_line=True, progress_bar_size=30)
def url_pattern(pattern: str):
"""
Convert a string *pattern* where any occurrences of ``{{NAME}}`` are replaced by an equivalent
regex expression which will assign matching character groups to NAME. Characters match until
one of the RFC 2396 reserved characters is found or the end of the *pattern* is reached.
RFC 2396 Uniform Resource Identifiers (URI): Generic Syntax lists
the following reserved characters::
reserved = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" | "$" | ","
:param pattern: URL pattern
:return: equivalent regex pattern
:raise ValueError: if *pattern* is invalid
"""
name_pattern = '(?P<%s>[^\;\/\?\:\@\&\=\+\$\,]+)'
reg_expr = ''
pos = 0
while True:
pos1 = pattern.find('{{', pos)
if pos1 >= 0:
pos2 = pattern.find('}}', pos1 + 2)
if pos2 > pos1:
name = pattern[pos1 + 2:pos2]
if not name.isidentifier():
raise ValueError('name in {{name}} must be a valid identifier, but got "%s"' % name)
reg_expr += pattern[pos:pos1] + (name_pattern % name)
pos = pos2 + 2
else:
raise ValueError('no matching "}}" after "{{" in "%s"' % pattern)
else:
reg_expr += pattern[pos:]
break
return reg_expr
def _status_ok(content: object = None):
return dict(status='ok', content=content)
def _status_error(exception: Exception = None, type_name: str = None, message: str = None):
trace_back = None
if exception is not None:
trace_back = traceback.format_exc()
type_name = type_name or type(exception).__name__
message = message or str(exception)
error_details = {}
if trace_back is not None:
error_details['traceback'] = trace_back
if type_name:
error_details['type'] = type_name
if message:
error_details['message'] = message
response = dict(status='error', error=dict(type=type_name, message=message))
if exception is not None:
response['traceback'] = traceback.format_exc()
return dict(status='error', error=error_details) if error_details else dict(status='error')
# noinspection PyAbstractClass
class BaseRequestHandler(RequestHandler):
def on_finish(self):
self.application.time_of_last_activity = time.clock()
# noinspection PyAbstractClass
class WorkspaceGetHandler(BaseRequestHandler):
def get(self, base_dir):
workspace_manager = self.application.workspace_manager
open_it = self.get_query_argument('open', default='False').lower() == 'true'
try:
workspace = workspace_manager.get_workspace(base_dir, open=open_it)
self.write(_status_ok(content=workspace.to_json_dict()))
except Exception as e:
self.write(_status_error(exception=e))
# noinspection PyAbstractClass
class WorkspaceGetOpenHandler(BaseRequestHandler):
def get(self):
workspace_manager = self.application.workspace_manager
try:
workspace_list = workspace_manager.get_open_workspaces()
self.write(_status_ok(content=[workspace.to_json_dict() for workspace in workspace_list]))
except Exception as e:
self.write(_status_error(exception=e))
# noinspection PyAbstractClass
class WorkspaceNewHandler(BaseRequestHandler):
def get(self):
base_dir = self.get_query_argument('base_dir')
save = self.get_query_argument('save', default='False').lower() == 'true'
description = self.get_query_argument('description', default='')
workspace_manager = self.application.workspace_manager
try:
workspace = workspace_manager.new_workspace(base_dir, save=save, description=description)
self.write(_status_ok(workspace.to_json_dict()))
except Exception as e:
self.write(_status_error(exception=e))
# noinspection PyAbstractClass
class WorkspaceOpenHandler(BaseRequestHandler):
def get(self, base_dir):
workspace_manager = self.application.workspace_manager
try:
workspace = workspace_manager.open_workspace(base_dir)
self.write(_status_ok(workspace.to_json_dict()))
except Exception as e:
self.write(_status_error(exception=e))
# noinspection PyAbstractClass
class WorkspaceCloseHandler(BaseRequestHandler):
def get(self, base_dir):
save = self.get_query_argument('save', default='False').lower() == 'true'
workspace_manager = self.application.workspace_manager
try:
workspace_manager.close_workspace(base_dir, save)
_on_workspace_closed(self.application)
self.write(_status_ok())
except Exception as e:
self.write(_status_error(exception=e))
# noinspection PyAbstractClass
class WorkspaceCloseAllHandler(BaseRequestHandler):
def get(self):
save = self.get_query_argument('save', default='False').lower() == 'true'
workspace_manager = self.application.workspace_manager
try:
workspace_manager.close_all_workspaces(save)
_on_workspace_closed(self.application)
self.write(_status_ok())
except Exception as e:
self.write(_status_error(exception=e))
# noinspection PyAbstractClass
class WorkspaceSaveHandler(BaseRequestHandler):
def get(self, base_dir):
workspace_manager = self.application.workspace_manager
try:
workspace_manager.save_workspace(base_dir)
self.write(_status_ok())
except Exception as e:
self.write(_status_error(exception=e))
# noinspection PyAbstractClass
class WorkspaceSaveAllHandler(BaseRequestHandler):
def get(self):
workspace_manager = self.application.workspace_manager
try:
workspace_manager.save_all_workspaces()
self.write(_status_ok())
except Exception as e:
self.write(_status_error(exception=e))
# noinspection PyAbstractClass
class WorkspaceDeleteHandler(BaseRequestHandler):
def get(self, base_dir):
workspace_manager = self.application.workspace_manager
try:
workspace_manager.delete_workspace(base_dir)
self.write(_status_ok())
except Exception as e:
self.write(_status_error(exception=e))
# noinspection PyAbstractClass
class WorkspaceCleanHandler(BaseRequestHandler):
def get(self, base_dir):
workspace_manager = self.application.workspace_manager
try:
workspace_manager.clean_workspace(base_dir)
self.write(_status_ok())
except Exception as e:
self.write(_status_error(exception=e))
# noinspection PyAbstractClass
class WorkspaceRunOpHandler(BaseRequestHandler):
def post(self, base_dir):
op_name = self.get_body_argument('op_name')
op_args = self.get_body_argument('op_args', default=None)
op_args = json.loads(op_args) if op_args else None
workspace_manager = self.application.workspace_manager
try:
with cwd(base_dir):
workspace_manager.run_op_in_workspace(base_dir, op_name, op_args=op_args,
monitor=_new_monitor())
self.write(_status_ok())
except Exception as e:
self.write(_status_error(exception=e))
# noinspection PyAbstractClass
class ResourceDeleteHandler(BaseRequestHandler):
def get(self, base_dir, res_name):
workspace_manager = self.application.workspace_manager
try:
with cwd(base_dir):
workspace_manager.delete_workspace_resource(base_dir, res_name)
self.write(_status_ok())
except Exception as e:
self.write(_status_error(exception=e))
# noinspection PyAbstractClass
class ResourceSetHandler(BaseRequestHandler):
def post(self, base_dir, res_name):
op_name = self.get_body_argument('op_name')
op_args = self.get_body_argument('op_args', default=None)
op_args = json.loads(op_args) if op_args else None
workspace_manager = self.application.workspace_manager
try:
with cwd(base_dir):
workspace_manager.set_workspace_resource(base_dir, res_name, op_name, op_args=op_args,
monitor=_new_monitor())
self.write(_status_ok())
except Exception as e:
self.write(_status_error(exception=e))
# noinspection PyAbstractClass
class ResourceWriteHandler(BaseRequestHandler):
def get(self, base_dir, res_name):
file_path = self.get_query_argument('file_path')
format_name = self.get_query_argument('format_name', default=None)
workspace_manager = self.application.workspace_manager
try:
with cwd(base_dir):
workspace_manager.write_workspace_resource(base_dir, res_name, file_path, format_name=format_name)
self.write(_status_ok())
except Exception as e:
self.write(_status_error(exception=e))
# noinspection PyAbstractClass
class ResourcePlotHandler(BaseRequestHandler):
def get(self, base_dir, res_name):
var_name = self.get_query_argument('var_name', default=None)
file_path = self.get_query_argument('file_path', default=None)
workspace_manager = self.application.workspace_manager
try:
with cwd(base_dir):
workspace_manager.plot_workspace_resource(base_dir, res_name, var_name=var_name, file_path=file_path)
self.write(_status_ok())
except Exception as e:
self.write(_status_error(exception=e))
# noinspection PyAbstractClass
class ResourcePrintHandler(BaseRequestHandler):
def get(self, base_dir):
res_name_or_expr = self.get_query_argument('res_name_or_expr', default=None)
workspace_manager = self.application.workspace_manager
try:
with cwd(base_dir):
workspace_manager.print_workspace_resource(base_dir, res_name_or_expr)
self.write(_status_ok())
except Exception as e:
self.write(_status_error(exception=e))
# noinspection PyAbstractClass
class VersionHandler(BaseRequestHandler):
def get(self):
self.write(_status_ok(content={'name': CLI_NAME,
'version': __version__,
'timestamp': date.today().isoformat()}))
# noinspection PyAbstractClass
class ExitHandler(RequestHandler):
def get(self):
self.write(_status_ok(content='Bye!'))
IOLoop.instance().add_callback(_exit, self.application)
if __name__ == "__main__":
main()
fix for service files without directory path
# The MIT License (MIT)
# Copyright (c) 2016 by the Cate Development Team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import json
import os.path
import signal
import socket
import subprocess
import sys
import time
import traceback
import urllib.request
from datetime import date, datetime
from threading import Timer
from typing import Optional
from cate.core.monitor import Monitor, ConsoleMonitor
from cate.core.util import cwd
from cate.ui.wsmanag import FSWorkspaceManager
from cate.version import __version__
from tornado.ioloop import IOLoop
from tornado.log import enable_pretty_logging
from tornado.web import RequestHandler, Application
# Explicitly load Cate-internal plugins.
__import__('cate.ds')
__import__('cate.ops')
CLI_NAME = 'cate-webapi'
LOCALHOST = '127.0.0.1'
# {{cate-config}}
# By default, WebAPI service will auto-exit after 2 hours of inactivity (if caller='cate', the CLI)
ON_INACTIVITY_AUTO_EXIT_AFTER = 120 * 60.0
# {{cate-config}}
# By default, WebAPI service will auto-exit after 5 seconds if all workspaces are closed (if caller='cate', the CLI)
ON_ALL_CLOSED_AUTO_EXIT_AFTER = 5.0
WEBAPI_LOG_FILE = os.path.join(os.path.expanduser('~'), '.cate', 'webapi.log')
class WebAPIServiceError(Exception):
def __init__(self, cause, *args, **kwargs):
if isinstance(cause, Exception):
super(WebAPIServiceError, self).__init__(str(cause), *args, **kwargs)
_, _, traceback = sys.exc_info()
self.with_traceback(traceback)
elif isinstance(cause, str):
super(WebAPIServiceError, self).__init__(cause, *args, **kwargs)
else:
super(WebAPIServiceError, self).__init__(*args, **kwargs)
self._cause = cause
@property
def cause(self):
return self._cause
# All JSON responses should have same structure, namely a dictionary as follows:
#
# {
# "status": "ok" | "error",
# "error": optional error-details,
# "content": optional content, if status "ok"
# }
def get_application():
application = Application([
(url_pattern('/'), VersionHandler),
(url_pattern('/ws/new'), WorkspaceNewHandler),
(url_pattern('/ws/get_open'), WorkspaceGetOpenHandler),
(url_pattern('/ws/get/{{base_dir}}'), WorkspaceGetHandler),
(url_pattern('/ws/open/{{base_dir}}'), WorkspaceOpenHandler),
(url_pattern('/ws/close/{{base_dir}}'), WorkspaceCloseHandler),
(url_pattern('/ws/close_all'), WorkspaceCloseAllHandler),
(url_pattern('/ws/save/{{base_dir}}'), WorkspaceSaveHandler),
(url_pattern('/ws/save_all'), WorkspaceSaveAllHandler),
(url_pattern('/ws/del/{{base_dir}}'), WorkspaceDeleteHandler),
(url_pattern('/ws/clean/{{base_dir}}'), WorkspaceCleanHandler),
(url_pattern('/ws/run_op/{{base_dir}}'), WorkspaceRunOpHandler),
(url_pattern('/ws/res/set/{{base_dir}}/{{res_name}}'), ResourceSetHandler),
(url_pattern('/ws/res/del/{{base_dir}}/{{res_name}}'), ResourceDeleteHandler),
(url_pattern('/ws/res/write/{{base_dir}}/{{res_name}}'), ResourceWriteHandler),
(url_pattern('/ws/res/plot/{{base_dir}}/{{res_name}}'), ResourcePlotHandler),
(url_pattern('/ws/res/print/{{base_dir}}'), ResourcePrintHandler),
(url_pattern('/exit'), ExitHandler)
])
application.workspace_manager = FSWorkspaceManager()
application.auto_exit_enabled = False
application.auto_exit_timer = None
application.service_info_file = None
application.time_of_last_activity = time.clock()
return application
def main(args=None):
if args is None:
args = sys.argv[1:]
parser = argparse.ArgumentParser(prog=CLI_NAME,
description='ESA CCI Toolbox WebAPI tool, version %s' % __version__)
parser.add_argument('--version', '-V', action='version', version='%s %s' % (CLI_NAME, __version__))
parser.add_argument('--port', '-p', dest='port', metavar='PORT', type=int,
help='run WebAPI service on port number PORT')
parser.add_argument('--address', '-a', dest='address', metavar='ADDRESS',
help='run WebAPI service using address ADDRESS', default='')
parser.add_argument('--caller', '-c', dest='caller', default=CLI_NAME,
help='name of the calling application')
parser.add_argument('--file', '-f', dest='file', metavar='FILE',
help="if given, service information will be written to (start) or read from (stop) FILE")
parser.add_argument('command', choices=['start', 'stop'],
help='start or stop the service')
args_obj = parser.parse_args(args)
kwargs = dict(port=args_obj.port,
address=args_obj.address,
caller=args_obj.caller,
service_info_file=args_obj.file)
if args_obj.command == 'start':
start_service(**kwargs)
else:
stop_service(kill_after=5.0, timeout=5.0, **kwargs)
def start_service_subprocess(port: int = None,
address: str = None,
caller: str = None,
service_info_file: str = None,
timeout: float = 10.0) -> None:
port = port or find_free_port()
command = _join_command('start', port, address, caller, service_info_file)
webapi = subprocess.Popen(command, shell=True)
webapi_url = 'http://%s:%s/' % (address or LOCALHOST, port)
t0 = time.clock()
while True:
exit_code = webapi.poll()
if exit_code is not None:
# Process terminated, we can return now, as there will be no running service
raise WebAPIServiceError('Cate WebAPI service terminated with exit code %d' % exit_code)
# noinspection PyBroadException
try:
urllib.request.urlopen(webapi_url, timeout=2)
# Success!
return
except Exception:
pass
time.sleep(0.1)
t1 = time.clock()
if t1 - t0 > timeout:
raise TimeoutError('Cate WebAPI service timeout, exceeded %d sec' % timeout)
def stop_service_subprocess(port: int = None,
address: str = None,
caller: str = None,
service_info_file: str = None,
timeout: float = 10.0) -> None:
command = _join_command('stop', port, address, caller, service_info_file)
exit_code = subprocess.call(command, shell=True, timeout=timeout)
if exit_code != 0:
raise WebAPIServiceError('Cate WebAPI service terminated with exit code %d' % exit_code)
def _join_command(sub_command, port, address, caller, service_info_file):
command = '"%s" -m cate.ui.webapi' % sys.executable
if port:
command += ' -p %d' % port
if address:
command += ' -a "%s"' % address
if caller:
command += ' -c "%s"' % caller
if service_info_file:
command += ' -f "%s"' % service_info_file
return command + ' ' + sub_command
def start_service(port: int = None, address: str = None, caller: str = None, service_info_file: str = None) -> dict:
"""
Start a WebAPI service.
The *service_info_file*, if given, represents the service in the filesystem, similar to
the ``/var/run/`` directory on Linux systems.
If the service file exist and its information is compatible with the requested *port*, *address*, *caller*, then
this function simply returns without taking any other actions.
:param port: the port number
:param address: the address
:param caller: the name of the calling application (informal)
:param service_info_file: If not ``None``, a service information JSON file will be written to *service_info_file*.
:return: service information dictionary
"""
if service_info_file and os.path.isfile(service_info_file):
service_info = read_service_info(service_info_file)
if is_service_compatible(port, address, caller, service_info):
port = service_info.get('port')
address = service_info.get('address') or LOCALHOST
if is_service_running(port, address):
print('Cate WebAPI service already running on %s:%s, reusing it' % (address, port))
return service_info
else:
# Try shutting down the service, even violently
stop_service(service_info_file, kill_after=5.0, timeout=5.0)
else:
print('warning: Cate WebAPI service info file exists: %s, removing it' % service_info_file)
os.remove(service_info_file)
import tornado.options
options = tornado.options.options
# Check, we should better use a log file per caller, e.g. "~/.cate/webapi-%s.log" % caller
options.log_file_prefix = WEBAPI_LOG_FILE
options.log_to_stderr = None
enable_pretty_logging()
application = get_application()
application.service_info_file = service_info_file
application.auto_exit_enabled = caller == 'cate'
port = port or find_free_port()
print('starting Cate WebAPI on %s:%s' % (address or LOCALHOST, port))
application.listen(port, address=address or '')
io_loop = IOLoop()
io_loop.make_current()
service_info = dict(port=port,
address=address,
caller=caller,
started=datetime.now().isoformat(sep=' '),
process_id=os.getpid())
if service_info_file:
_write_service_info(service_info, service_info_file)
# IOLoop.call_later(delay, callback, *args, **kwargs)
if application.auto_exit_enabled:
_install_next_inactivity_check(application)
IOLoop.instance().start()
return service_info
def stop_service(port=None,
address=None,
caller: str = None,
service_info_file: str = None,
kill_after: float = None,
timeout: float = 10.0) -> dict:
"""
Stop a WebAPI service.
:param port:
:param address:
:param caller:
:param service_info_file:
:param kill_after: if not ``None``, the number of seconds to wait after a hanging service process will be killed
:param timeout:
:return: service information dictionary
"""
service_info = {}
if service_info_file:
service_info = read_service_info(service_info_file)
if service_info is None and port is None:
raise RuntimeWarning('Cate WebAPI service not running')
service_info = service_info or {}
port = port or service_info.get('port')
address = address or service_info.get('address')
caller = caller or service_info.get('caller')
pid = service_info.get('process_id')
if not port:
raise WebAPIServiceError('cannot stop Cate WebAPI service on unknown port (caller: %s)' % caller)
address_and_port = '%s:%s' % (address or LOCALHOST, port)
print('stopping Cate WebAPI on %s' % address_and_port)
# noinspection PyBroadException
try:
with urllib.request.urlopen('http://%s/exit' % address_and_port, timeout=timeout * 0.3) as response:
response.read()
except:
# Either process does not exist, or timeout, or some other error
pass
# give the service a bit time to shut down before testing
time.sleep(kill_after * 0.5)
# Note: is_service_running() should be replaced by is_process_active(pid)
if kill_after and pid and is_service_running(port, address, timeout=timeout * 0.3):
# If we have a PID and the service runs
time.sleep(kill_after * 0.5)
# Note: is_service_running() should be replaced by is_process_active(pid)
if is_service_running(port, address, timeout=timeout * 0.3):
# noinspection PyBroadException
try:
os.kill(pid, signal.SIGTERM)
except:
pass
if os.path.isfile(service_info_file):
os.remove(service_info_file)
return dict(port=port, address=address, caller=caller, started=service_info.get('started', None))
def is_service_compatible(port: Optional[int], address: Optional[str], caller: Optional[str],
service_info: dict) -> bool:
if not port and not service_info.get('port'):
# This means we have a service_info without port, should actually never happen,
# but who knows, service_info_file may have been modified.
return False
port_ok = port == service_info.get('port') if port and port > 0 else True
address_ok = address == service_info.get('address') if address else True
caller_ok = caller == service_info.get('caller') if caller else True
return port_ok and address_ok and caller_ok
def is_service_running(port: int, address: str, timeout: float = 10.0) -> bool:
url = 'http://%s:%s/' % (address or '127.0.0.1', port)
try:
with urllib.request.urlopen(url, timeout=timeout) as response:
json_text = response.read()
except:
return False
json_response = json.loads(json_text.decode('utf-8'))
return json_response.get('status') == 'ok'
def find_free_port():
s = socket.socket()
# Bind to a free port provided by the host.
s.bind(('', 0))
free_port = s.getsockname()[1]
s.close()
# Return the port number assigned.
return free_port
def read_service_info(service_info_file: str) -> dict:
"""
Get a dictionary with WebAPI service information:::
{
"port": service-port-number (int)
"address": service-address (str)
"caller": caller-name (str)
"started": service-start-time (str)
}
:return: dictionary with WebAPI service information or ``None`` if it does not exist
:raise OSError, IOError: if information file exists, but could not be loaded
"""
if not service_info_file:
raise ValueError('service_info_file argument must be given')
if os.path.isfile(service_info_file):
with open(service_info_file) as fp:
return json.load(fp=fp) or {}
return None
def _write_service_info(service_info: dict, service_info_file: str) -> None:
if not service_info:
raise ValueError('service_info argument must be given')
if not service_info_file:
raise ValueError('service_info_file argument must be given')
dir_path = os.path.dirname(service_info_file)
if dir_path:
os.makedirs(dir_path, exist_ok=True)
with open(service_info_file, 'w') as fp:
json.dump(service_info, fp, indent=' ')
def _exit(application: Application):
service_info_file = application.service_info_file
if service_info_file and os.path.isfile(service_info_file):
# noinspection PyBroadException
try:
os.remove(service_info_file)
except:
pass
IOLoop.instance().stop()
def _install_next_inactivity_check(application):
IOLoop.instance().call_later(ON_INACTIVITY_AUTO_EXIT_AFTER, _check_inactivity, application)
def _check_inactivity(application: Application):
inactivity_time = time.clock() - application.time_of_last_activity
if inactivity_time > ON_INACTIVITY_AUTO_EXIT_AFTER:
print('stopping WebAPI service after %.1f seconds of inactivity' % inactivity_time)
_auto_exit(application)
else:
_install_next_inactivity_check(application)
def _auto_exit(application: Application):
IOLoop.instance().add_callback(_exit, application)
def _on_workspace_closed(application: Application):
if not application.auto_exit_enabled:
return
workspace_manager = application.workspace_manager
num_open_workspaces = workspace_manager.num_open_workspaces()
_check_auto_exit(application, num_open_workspaces == 0, ON_ALL_CLOSED_AUTO_EXIT_AFTER)
def _check_auto_exit(application: Application, condition: bool, interval: float):
if application.auto_exit_timer is not None:
# noinspection PyBroadException
try:
application.auto_exit_timer.cancel()
except:
pass
if condition:
application.auto_exit_timer = Timer(interval, _auto_exit, [application])
application.auto_exit_timer.start()
else:
application.auto_exit_timer = None
def _new_monitor() -> Monitor:
return ConsoleMonitor(stay_in_line=True, progress_bar_size=30)
def url_pattern(pattern: str):
"""
Convert a string *pattern* where any occurrences of ``{{NAME}}`` are replaced by an equivalent
regex expression which will assign matching character groups to NAME. Characters match until
one of the RFC 2396 reserved characters is found or the end of the *pattern* is reached.
RFC 2396 Uniform Resource Identifiers (URI): Generic Syntax lists
the following reserved characters::
reserved = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" | "$" | ","
:param pattern: URL pattern
:return: equivalent regex pattern
:raise ValueError: if *pattern* is invalid
"""
name_pattern = '(?P<%s>[^\;\/\?\:\@\&\=\+\$\,]+)'
reg_expr = ''
pos = 0
while True:
pos1 = pattern.find('{{', pos)
if pos1 >= 0:
pos2 = pattern.find('}}', pos1 + 2)
if pos2 > pos1:
name = pattern[pos1 + 2:pos2]
if not name.isidentifier():
raise ValueError('name in {{name}} must be a valid identifier, but got "%s"' % name)
reg_expr += pattern[pos:pos1] + (name_pattern % name)
pos = pos2 + 2
else:
raise ValueError('no matching "}}" after "{{" in "%s"' % pattern)
else:
reg_expr += pattern[pos:]
break
return reg_expr
def _status_ok(content: object = None):
return dict(status='ok', content=content)
def _status_error(exception: Exception = None, type_name: str = None, message: str = None):
trace_back = None
if exception is not None:
trace_back = traceback.format_exc()
type_name = type_name or type(exception).__name__
message = message or str(exception)
error_details = {}
if trace_back is not None:
error_details['traceback'] = trace_back
if type_name:
error_details['type'] = type_name
if message:
error_details['message'] = message
response = dict(status='error', error=dict(type=type_name, message=message))
if exception is not None:
response['traceback'] = traceback.format_exc()
return dict(status='error', error=error_details) if error_details else dict(status='error')
# noinspection PyAbstractClass
class BaseRequestHandler(RequestHandler):
def on_finish(self):
self.application.time_of_last_activity = time.clock()
# noinspection PyAbstractClass
class WorkspaceGetHandler(BaseRequestHandler):
def get(self, base_dir):
workspace_manager = self.application.workspace_manager
open_it = self.get_query_argument('open', default='False').lower() == 'true'
try:
workspace = workspace_manager.get_workspace(base_dir, open=open_it)
self.write(_status_ok(content=workspace.to_json_dict()))
except Exception as e:
self.write(_status_error(exception=e))
# noinspection PyAbstractClass
class WorkspaceGetOpenHandler(BaseRequestHandler):
def get(self):
workspace_manager = self.application.workspace_manager
try:
workspace_list = workspace_manager.get_open_workspaces()
self.write(_status_ok(content=[workspace.to_json_dict() for workspace in workspace_list]))
except Exception as e:
self.write(_status_error(exception=e))
# noinspection PyAbstractClass
class WorkspaceNewHandler(BaseRequestHandler):
def get(self):
base_dir = self.get_query_argument('base_dir')
save = self.get_query_argument('save', default='False').lower() == 'true'
description = self.get_query_argument('description', default='')
workspace_manager = self.application.workspace_manager
try:
workspace = workspace_manager.new_workspace(base_dir, save=save, description=description)
self.write(_status_ok(workspace.to_json_dict()))
except Exception as e:
self.write(_status_error(exception=e))
# noinspection PyAbstractClass
class WorkspaceOpenHandler(BaseRequestHandler):
def get(self, base_dir):
workspace_manager = self.application.workspace_manager
try:
workspace = workspace_manager.open_workspace(base_dir)
self.write(_status_ok(workspace.to_json_dict()))
except Exception as e:
self.write(_status_error(exception=e))
# noinspection PyAbstractClass
class WorkspaceCloseHandler(BaseRequestHandler):
def get(self, base_dir):
save = self.get_query_argument('save', default='False').lower() == 'true'
workspace_manager = self.application.workspace_manager
try:
workspace_manager.close_workspace(base_dir, save)
_on_workspace_closed(self.application)
self.write(_status_ok())
except Exception as e:
self.write(_status_error(exception=e))
# noinspection PyAbstractClass
class WorkspaceCloseAllHandler(BaseRequestHandler):
def get(self):
save = self.get_query_argument('save', default='False').lower() == 'true'
workspace_manager = self.application.workspace_manager
try:
workspace_manager.close_all_workspaces(save)
_on_workspace_closed(self.application)
self.write(_status_ok())
except Exception as e:
self.write(_status_error(exception=e))
# noinspection PyAbstractClass
class WorkspaceSaveHandler(BaseRequestHandler):
def get(self, base_dir):
workspace_manager = self.application.workspace_manager
try:
workspace_manager.save_workspace(base_dir)
self.write(_status_ok())
except Exception as e:
self.write(_status_error(exception=e))
# noinspection PyAbstractClass
class WorkspaceSaveAllHandler(BaseRequestHandler):
def get(self):
workspace_manager = self.application.workspace_manager
try:
workspace_manager.save_all_workspaces()
self.write(_status_ok())
except Exception as e:
self.write(_status_error(exception=e))
# noinspection PyAbstractClass
class WorkspaceDeleteHandler(BaseRequestHandler):
def get(self, base_dir):
workspace_manager = self.application.workspace_manager
try:
workspace_manager.delete_workspace(base_dir)
self.write(_status_ok())
except Exception as e:
self.write(_status_error(exception=e))
# noinspection PyAbstractClass
class WorkspaceCleanHandler(BaseRequestHandler):
def get(self, base_dir):
workspace_manager = self.application.workspace_manager
try:
workspace_manager.clean_workspace(base_dir)
self.write(_status_ok())
except Exception as e:
self.write(_status_error(exception=e))
# noinspection PyAbstractClass
class WorkspaceRunOpHandler(BaseRequestHandler):
def post(self, base_dir):
op_name = self.get_body_argument('op_name')
op_args = self.get_body_argument('op_args', default=None)
op_args = json.loads(op_args) if op_args else None
workspace_manager = self.application.workspace_manager
try:
with cwd(base_dir):
workspace_manager.run_op_in_workspace(base_dir, op_name, op_args=op_args,
monitor=_new_monitor())
self.write(_status_ok())
except Exception as e:
self.write(_status_error(exception=e))
# noinspection PyAbstractClass
class ResourceDeleteHandler(BaseRequestHandler):
def get(self, base_dir, res_name):
workspace_manager = self.application.workspace_manager
try:
with cwd(base_dir):
workspace_manager.delete_workspace_resource(base_dir, res_name)
self.write(_status_ok())
except Exception as e:
self.write(_status_error(exception=e))
# noinspection PyAbstractClass
class ResourceSetHandler(BaseRequestHandler):
def post(self, base_dir, res_name):
op_name = self.get_body_argument('op_name')
op_args = self.get_body_argument('op_args', default=None)
op_args = json.loads(op_args) if op_args else None
workspace_manager = self.application.workspace_manager
try:
with cwd(base_dir):
workspace_manager.set_workspace_resource(base_dir, res_name, op_name, op_args=op_args,
monitor=_new_monitor())
self.write(_status_ok())
except Exception as e:
self.write(_status_error(exception=e))
# noinspection PyAbstractClass
class ResourceWriteHandler(BaseRequestHandler):
def get(self, base_dir, res_name):
file_path = self.get_query_argument('file_path')
format_name = self.get_query_argument('format_name', default=None)
workspace_manager = self.application.workspace_manager
try:
with cwd(base_dir):
workspace_manager.write_workspace_resource(base_dir, res_name, file_path, format_name=format_name)
self.write(_status_ok())
except Exception as e:
self.write(_status_error(exception=e))
# noinspection PyAbstractClass
class ResourcePlotHandler(BaseRequestHandler):
def get(self, base_dir, res_name):
var_name = self.get_query_argument('var_name', default=None)
file_path = self.get_query_argument('file_path', default=None)
workspace_manager = self.application.workspace_manager
try:
with cwd(base_dir):
workspace_manager.plot_workspace_resource(base_dir, res_name, var_name=var_name, file_path=file_path)
self.write(_status_ok())
except Exception as e:
self.write(_status_error(exception=e))
# noinspection PyAbstractClass
class ResourcePrintHandler(BaseRequestHandler):
def get(self, base_dir):
res_name_or_expr = self.get_query_argument('res_name_or_expr', default=None)
workspace_manager = self.application.workspace_manager
try:
with cwd(base_dir):
workspace_manager.print_workspace_resource(base_dir, res_name_or_expr)
self.write(_status_ok())
except Exception as e:
self.write(_status_error(exception=e))
# noinspection PyAbstractClass
class VersionHandler(BaseRequestHandler):
def get(self):
self.write(_status_ok(content={'name': CLI_NAME,
'version': __version__,
'timestamp': date.today().isoformat()}))
# noinspection PyAbstractClass
class ExitHandler(RequestHandler):
def get(self):
self.write(_status_ok(content='Bye!'))
IOLoop.instance().add_callback(_exit, self.application)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python2.7
import argparse
import logging
import os
import pytz
import re
import sys
import time
from datetime import datetime, timedelta
from dateutil import parser
from glanceclient import client
from iso8601 import iso8601
from keystoneclient.v2_0 import client as ksclient
def is_remote_image(image):
if image.get('location'):
http_re = re.compile(r'^(http|https|ftp)://')
return http_re.match(image.location)
return False
def switch_on_criticality():
if options.criticality == 'warning':
return 1
else:
return 2
glance_auth = {
'username': os.environ['OS_USERNAME'],
'password': os.environ['OS_PASSWORD'],
'tenant_name': os.environ['OS_TENANT_NAME'],
'auth_url': os.environ['OS_AUTH_URL'],
'region_name': 'RegionOne',
}
argparser = argparse.ArgumentParser()
argparser.add_argument('--imagedir', help='Glance file store image directory',
default='/var/lib/glance/images')
argparser.add_argument('--debug', help='Enable API debugging', action='store_true')
argparser.add_argument('--criticality', help='Set sensu alert level, critical is default',
default='critical')
options = argparser.parse_args()
store_directory = options.imagedir
if 'OS_CACERT' in os.environ.keys():
glance_auth['ca_cert'] = os.environ['OS_CACERT']
keystone = ksclient.Client(**glance_auth)
auth_token = keystone.auth_token
endpoint = keystone.service_catalog.url_for(service_type='image',
endpoint_type='publicURL')
if options.debug:
logging.basicConfig(level=logging.DEBUG)
glance = client.Client('2', endpoint=endpoint, token=auth_token)
glance.format = 'json'
# Fetch the list of files in store_directory matching the UUID regex
uuid_re = re.compile(
r'[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}')
files = [(x, os.path.join(store_directory, x)) for x in
os.listdir(store_directory) if uuid_re.match(x)]
files = [(x, os.path.getsize(p),
datetime.fromtimestamp(os.path.getmtime(p), iso8601.Utc()))
for x, p in files if os.path.isfile(p)]
# Fetch the list of glance images
glance_images = []
kwargs = {'sort_key': 'id', 'sort_dir': 'asc', 'owner': None, 'filters': {}, 'is_public': None}
for x in glance.images.list(**kwargs):
if x.status == 'active':
tz_aware_time = parser.parse(x.created_at)
glance_images.append((x.id, x.size, tz_aware_time, is_remote_image(x)))
# Check all active images 1 hour or older are present
time_cutoff = datetime.now(iso8601.Utc()) - timedelta(0, 3600)
alert_squelch = datetime.now(iso8601.Utc()) - timedelta(0, 43200) # 12 hours
result = 0
for image in [x for x in glance_images if x[2] < time_cutoff]:
if not [x for x in files if x[0] == image[0]]:
if image[3] == False:
print "Glance image %s not found in %s" % (image[0], store_directory)
result = switch_on_criticality()
# Check all files have a corresponding glance image and ignore brand new / zero size files
for image_file in files:
if not [x for x in glance_images if x[0] == image_file[0]] and image_file[2] < alert_squelch and image_file[1] > 0:
print "Unknown file %s found in %s" % (image_file[0], store_directory)
result = switch_on_criticality()
# Check glance image file sizes match files and ignore difference for squelch
for image in glance_images:
for image_file in [x for x in files if x[0] == image[0]]:
if image[1] != image_file[1] and image_file[2] < alert_squelch:
print "Glance image %s size differs from file on disk" % image[0]
result = switch_on_criticality()
if result == 0:
print "Glance image store %s looks good" % store_directory
sys.exit(result)
fix image alert for remote images (#91)
* fix image alert for remote images
What's fixed:
1. key for image location is 'locations', not 'location'
2. one image may have multiple locations.
3. only all locations are remote means it's an remote image. That means
if one of the locations is 'file', it's not a remote image.
#!/usr/bin/env python2.7
import argparse
import logging
import os
import pytz
import re
import sys
import time
from datetime import datetime, timedelta
from dateutil import parser
from glanceclient import client
from iso8601 import iso8601
from keystoneclient.v2_0 import client as ksclient
def is_remote_image(image):
http_re = re.compile(r'^(file)://')
for location in image.get('locations', []):
if http_re.match(location.get('url')):
return False
return True
def switch_on_criticality():
if options.criticality == 'warning':
return 1
else:
return 2
glance_auth = {
'username': os.environ['OS_USERNAME'],
'password': os.environ['OS_PASSWORD'],
'tenant_name': os.environ['OS_TENANT_NAME'],
'auth_url': os.environ['OS_AUTH_URL'],
'region_name': 'RegionOne',
}
argparser = argparse.ArgumentParser()
argparser.add_argument('--imagedir', help='Glance file store image directory',
default='/var/lib/glance/images')
argparser.add_argument('--debug', help='Enable API debugging', action='store_true')
argparser.add_argument('--criticality', help='Set sensu alert level, critical is default',
default='critical')
options = argparser.parse_args()
store_directory = options.imagedir
if 'OS_CACERT' in os.environ.keys():
glance_auth['ca_cert'] = os.environ['OS_CACERT']
keystone = ksclient.Client(**glance_auth)
auth_token = keystone.auth_token
endpoint = keystone.service_catalog.url_for(service_type='image',
endpoint_type='publicURL')
if options.debug:
logging.basicConfig(level=logging.DEBUG)
glance = client.Client('2', endpoint=endpoint, token=auth_token)
glance.format = 'json'
# Fetch the list of files in store_directory matching the UUID regex
uuid_re = re.compile(
r'[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}')
files = [(x, os.path.join(store_directory, x)) for x in
os.listdir(store_directory) if uuid_re.match(x)]
files = [(x, os.path.getsize(p),
datetime.fromtimestamp(os.path.getmtime(p), iso8601.Utc()))
for x, p in files if os.path.isfile(p)]
# Fetch the list of glance images
glance_images = []
kwargs = {'sort_key': 'id', 'sort_dir': 'asc', 'owner': None, 'filters': {}, 'is_public': None}
for x in glance.images.list(**kwargs):
if x.status == 'active':
tz_aware_time = parser.parse(x.created_at)
glance_images.append((x.id, x.size, tz_aware_time, is_remote_image(x)))
# Check all active images 1 hour or older are present
time_cutoff = datetime.now(iso8601.Utc()) - timedelta(0, 3600)
alert_squelch = datetime.now(iso8601.Utc()) - timedelta(0, 43200) # 12 hours
result = 0
for image in [x for x in glance_images if x[2] < time_cutoff]:
if not [x for x in files if x[0] == image[0]]:
if image[3] == False:
print "Glance image %s not found in %s" % (image[0], store_directory)
result = switch_on_criticality()
# Check all files have a corresponding glance image and ignore brand new / zero size files
for image_file in files:
if not [x for x in glance_images if x[0] == image_file[0]] and image_file[2] < alert_squelch and image_file[1] > 0:
print "Unknown file %s found in %s" % (image_file[0], store_directory)
result = switch_on_criticality()
# Check glance image file sizes match files and ignore difference for squelch
for image in glance_images:
for image_file in [x for x in files if x[0] == image[0]]:
if image[1] != image_file[1] and image_file[2] < alert_squelch:
print "Glance image %s size differs from file on disk" % image[0]
result = switch_on_criticality()
if result == 0:
print "Glance image store %s looks good" % store_directory
sys.exit(result)
|
from __future__ import absolute_import
import os
import time
import unittest
from nose.tools import eq_, ok_
import binascii
try:
from Queue import Queue, Empty
except ImportError:
# Python 3
from queue import Queue, Empty
from openxc.tools.common import configure_logging
from openxc.interface import UsbVehicleInterface, BluetoothVehicleInterface
SOURCE = None
def setUpModule():
configure_logging()
# A bit of a hack to let us pass the product ID in at the command line, so
# we can have 2 devices attached for testing at a time so it's more
# automated. Set the VI_FUNC_TESTS_USB_PRODUCT_ID environment variable to a
# number you want to use for the product ID.
usb_product_id = os.getenv('VI_FUNC_TESTS_USB_PRODUCT_ID', None)
use_bluetooth = os.getenv('VI_FUNC_TESTS_USE_BLUETOOTH', False)
global SOURCE
if use_bluetooth is not False:
SOURCE = BluetoothVehicleInterface(payload_format="json")
else:
SOURCE = UsbVehicleInterface(payload_format="json", product_id=usb_product_id)
SOURCE.start()
class ViFunctionalTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(ViFunctionalTests, cls).setUpClass()
global SOURCE
cls.vi = SOURCE
cls.vi.callback = cls.receive
def setUp(self):
self.bus = 1
self.message_id = 0x42
self.data = "0x1234"
ViFunctionalTests.can_message_queue = Queue()
ViFunctionalTests.simple_vehicle_message_queue = Queue()
ViFunctionalTests.diagnostic_response_queue = Queue()
self.vi.set_acceptance_filter_bypass(1, True)
self.vi.set_acceptance_filter_bypass(2, False)
@classmethod
def receive(cls, message, **kwargs):
if ('id' in message and 'bus' in message and 'data' in message and
getattr(cls, 'can_message_queue', None)):
cls.can_message_queue.put(message)
elif ('name' in message and 'value' in message):
cls.simple_vehicle_message_queue.put(message)
elif ('id' in message and 'bus' in message and 'mode' in message):
cls.diagnostic_response_queue.put(message)
class ProtobufBaseTests(ViFunctionalTests):
@classmethod
def setUpClass(cls):
super(ProtobufBaseTests, cls).setUpClass()
if isinstance(cls.vi, BluetoothVehicleInterface):
raise unittest.SkipTest("Protobuf commands are not "
"supported on the Bluetooth interface")
cls.vi.set_payload_format("protobuf")
if not cls.vi.set_payload_format("protobuf"):
cls.vi.format = "json"
cls.vi.set_payload_format("protobuf")
class JsonBaseTests(ViFunctionalTests):
@classmethod
def setUpClass(cls):
super(JsonBaseTests, cls).setUpClass()
if not cls.vi.set_payload_format("json"):
cls.vi.format = "protobuf"
cls.vi.set_payload_format("json")
class ControlCommandTests(object):
def test_version(self):
# TODO it'd be nice to read this from src/version.c
eq_(self.vi.version(), "7.0.0-dev (functional_tests)")
def test_device_id(self):
device_id = self.vi.device_id()
ok_(device_id is not None)
if device_id != "Unknown":
eq_(len(device_id), 12)
class ControlCommandTestsJson(JsonBaseTests, ControlCommandTests):
pass
class ControlCommandTestsProtobuf(ProtobufBaseTests, ControlCommandTests):
pass
class CanMessageTests(object):
def _check_received_message(self, message):
eq_(self.message_id, message['id'])
eq_(self.bus, message['bus'])
eq_(self.data, message['data'])
self.can_message_queue.task_done()
def test_send_and_receive_can_message(self):
ok_(self.vi.set_acceptance_filter_bypass(1, False))
ok_(self.vi.write(bus=self.bus, id=self.message_id, data=self.data))
self._check_received_message(self.can_message_queue.get(timeout=.5))
def test_nonmatching_can_message_received_on_unfiltered_bus(self):
ok_(self.vi.set_acceptance_filter_bypass(1, True))
self.message_id += 1
ok_(self.vi.write(bus=self.bus, id=self.message_id, data=self.data))
self._check_received_message(self.can_message_queue.get(timeout=.5))
def test_matching_can_message_received_on_filtered_bus(self):
ok_(self.vi.set_acceptance_filter_bypass(2, False))
self.message_id = 0x43
self.bus = 2
ok_(self.vi.write(bus=self.bus, id=self.message_id, data=self.data))
self._check_received_message(self.can_message_queue.get(timeout=.5))
def test_nonmatching_can_message_not_received_on_filtered_bus(self):
ok_(self.vi.set_acceptance_filter_bypass(2, False))
self.bus = 2
ok_(self.vi.write(bus=self.bus, id=self.message_id, data=self.data))
try:
message = self.can_message_queue.get(timeout=.5)
except Empty:
pass
else:
eq_(None, message)
self.can_message_queue.task_done()
def test_send_and_receive_extended_can_frame(self):
self.message_id = 0x809
ok_(self.vi.write(bus=self.bus, id=self.message_id, data=self.data))
self._check_received_message(self.can_message_queue.get(timeout=.5))
def test_send_and_receive_extended_can_frame_after_toggling_af(self):
self.message_id = 0x809
ok_(self.vi.set_acceptance_filter_bypass(1, False))
ok_(self.vi.write(bus=self.bus, id=self.message_id, data=self.data))
message = None
try:
message = self.can_message_queue.get(timeout=.5)
except Empty:
pass
ok_(message is None)
ok_(self.vi.set_acceptance_filter_bypass(1, True))
ok_(self.vi.write(bus=self.bus, id=self.message_id, data=self.data))
self._check_received_message(self.can_message_queue.get(timeout=.5))
class CanMessageTestsJson(JsonBaseTests, CanMessageTests):
pass
class CanMessageTestsProtobuf(ProtobufBaseTests, CanMessageTests):
pass
class SimpleVehicleMessageTests(object):
def setUp(self):
self.message1_data = "0x0000000000000dac"
self.expected_signal_value = 0xdac
def test_receive_simple_vehicle_message_bus1(self):
ok_(self.vi.write(bus=1, id=0x42, data=self.message1_data))
message = self.simple_vehicle_message_queue.get(timeout=.5)
eq_(message['name'], "signal1")
eq_(message['value'], self.expected_signal_value)
self.simple_vehicle_message_queue.task_done()
def test_receive_simple_vehicle_message_bus2(self):
message_data = "0x8000000000000000"
ok_(self.vi.write(bus=2, id=0x43, data=message_data))
message = self.simple_vehicle_message_queue.get(timeout=.5)
eq_(message['name'], "signal2")
eq_(message['value'], 1)
self.simple_vehicle_message_queue.task_done()
class SimpleVehicleMessageTestsJson(JsonBaseTests, SimpleVehicleMessageTests):
def setUp(self):
super(SimpleVehicleMessageTestsJson, self).setUp()
SimpleVehicleMessageTests.setUp(self)
class SimpleVehicleMessageTestsProtobuf(ProtobufBaseTests,
SimpleVehicleMessageTests):
def setUp(self):
super(SimpleVehicleMessageTestsProtobuf, self).setUp()
SimpleVehicleMessageTests.setUp(self)
class DiagnosticRequestTests(object):
def setUp(self):
self.message_id = 0x121
self.mode = 3
self.bus = 1
self.pid = 1
self.payload = bytearray([0x12, 0x34])
def test_diagnostic_request(self):
# This test is done with bus 1, since that has the CAN AF off, so we
# can receive the sent message (via loopback) to validate it matches the
# request.
ok_(self.vi.create_diagnostic_request(self.message_id, self.mode,
bus=self.bus, pid=self.pid, payload=self.payload))
message = self.can_message_queue.get(timeout=.5)
eq_(self.message_id, message['id'])
eq_(self.bus, message['bus'])
eq_("0x04%02x%02x%s000000" % (self.mode, self.pid,
binascii.hexlify(self.payload)), message['data'])
self.can_message_queue.task_done()
def test_diagnostic_request_changes_acceptance_filters(self):
# This test is done with bus 2, since that has the CAN AF ON, so we
# make sure the VI can change the AF to accept responses.
# We use bus 2 since that should still have the AF on.
self.bus = 2
ok_(self.vi.create_diagnostic_request(self.message_id, self.mode,
bus=self.bus, pid=self.pid, payload=self.payload))
# send the response, which should be accepted by the AF
response_id = self.message_id + 0x8
# we don't care about the payload at this point, just want to make sure
# we receive the raw CAN message
ok_(self.vi.write(bus=self.bus, id=response_id, data="0xabcd"))
message = None
while message is None:
message = self.can_message_queue.get(timeout=.5)
if message['id'] == self.message_id:
# skip the request
continue
elif message['id'] == response_id:
break
ok_(message is not None)
self.can_message_queue.task_done()
def test_receive_diagnostic_response(self):
ok_(self.vi.create_diagnostic_request(self.message_id, self.mode, bus=self.bus,
pid=self.pid, payload=self.payload))
response_id = self.message_id + 0x8
# we don't care about the payload at this point, just want to make sure
# we receive the raw CAN message
ok_(self.vi.write(bus=self.bus, id=response_id, data="0x03430142"))
response = self.diagnostic_response_queue.get(timeout=.5)
eq_(self.message_id, response['id'])
eq_(self.bus, response['bus'])
eq_(self.mode, response['mode'])
eq_(self.pid, response['pid'])
ok_(response['success'])
eq_("0x42", response['payload'])
self.diagnostic_response_queue.task_done()
def test_receive_obd_formatted_diagnostic_response(self):
self.pid = 0xa
self.mode = 1
ok_(self.vi.create_diagnostic_request(self.message_id, self.mode, bus=self.bus,
pid=self.pid, payload=self.payload, decoded_type="obd2"))
response_id = self.message_id + 0x8
response_value = 0x42
ok_(self.vi.write(bus=self.bus, id=response_id, data="0x034%01x%02x%02x" % (
self.mode, self.pid, response_value)))
response = self.diagnostic_response_queue.get(timeout=.5)
eq_(self.message_id, response['id'])
eq_(self.bus, response['bus'])
eq_(self.mode, response['mode'])
eq_(self.pid, response['pid'])
ok_(response['success'])
eq_(response_value * 3, response['value'])
self.diagnostic_response_queue.task_done()
def test_create_recurring_request(self):
try:
ok_(self.vi.create_diagnostic_request(self.message_id, self.mode,
bus=self.bus, pid=self.pid, payload=self.payload,
frequency=10))
for _ in range(5):
message = self.can_message_queue.get(timeout=.5)
eq_(self.message_id, message['id'])
eq_(self.bus, message['bus'])
eq_("0x04%02x%02x%s000000" % (self.mode, self.pid,
binascii.hexlify(self.payload)), message['data'])
self.can_message_queue.task_done()
finally:
ok_(self.vi.delete_diagnostic_request(self.message_id, self.mode,
bus=self.bus, pid=self.pid))
def test_cancel_diagnostic_request(self):
ok_(self.vi.create_diagnostic_request(self.message_id, self.mode,
bus=self.bus, pid=self.pid, payload=self.payload,
frequency=5))
time.sleep(1)
ok_(self.vi.delete_diagnostic_request(self.message_id, self.mode,
bus=self.bus, pid=self.pid))
ViFunctionalTests.can_message_queue = Queue()
try:
self.can_message_queue.get(timeout=.5)
except Empty:
pass
else:
ok_(False)
class DiagnosticRequestTestsJson(JsonBaseTests, DiagnosticRequestTests):
def setUp(self):
super(DiagnosticRequestTestsJson, self).setUp()
DiagnosticRequestTests.setUp(self)
class DiagnosticRequestTestsProtobuf(ProtobufBaseTests, DiagnosticRequestTests):
def setUp(self):
super(DiagnosticRequestTestsProtobuf, self).setUp()
DiagnosticRequestTests.setUp(self)
class CanAcceptanceFilterChangeTests(object):
def _check_received_message(self, message):
eq_(self.message_id, message['id'])
eq_(self.bus, message['bus'])
eq_(self.data, message['data'])
self.can_message_queue.task_done()
def test_message_not_received_after_filters_enabled(self):
ok_(self.vi.set_acceptance_filter_bypass(1, False))
# Should receive only 42
ok_(self.vi.write(bus=self.bus, id=self.message_id, data=self.data))
self._check_received_message(self.can_message_queue.get(timeout=.5))
ok_(self.vi.write(bus=self.bus, id=self.message_id + 1, data=self.data))
try:
self.can_message_queue.get(timeout=.5)
except Empty:
pass
else:
ok_(False)
def test_nonmatching_can_message_received_on_unfiltered_bus(self):
ok_(self.vi.set_acceptance_filter_bypass(1, True))
self.message_id += 1
ok_(self.vi.write(bus=self.bus, id=self.message_id, data=self.data) > 0)
self._check_received_message(self.can_message_queue.get(timeout=.5))
class CanAcceptanceFilterChangeTestsJson(JsonBaseTests,
CanAcceptanceFilterChangeTests):
pass
class CanAcceptanceFilterChangeTestsProtobuf(ProtobufBaseTests,
CanAcceptanceFilterChangeTests):
pass
class PayloadFormatTests(ViFunctionalTests):
def tearDown(self):
ok_(self.vi.set_payload_format("json"))
def _check_received_message(self, message):
eq_(self.message_id, message['id'])
eq_(self.bus, message['bus'])
eq_(self.data, message['data'])
self.can_message_queue.task_done()
def test_change_to_binary(self):
if isinstance(self.vi, BluetoothVehicleInterface):
raise unittest.SkipTest("Protobuf commands are not "
"supported on the Bluetooth interface")
ok_(self.vi.set_payload_format("protobuf"))
ok_(self.vi.write(bus=self.bus, id=self.message_id, data=self.data) > 0)
self._check_received_message(self.can_message_queue.get(timeout=.5))
def test_change_to_json(self):
ok_(self.vi.set_payload_format("json"))
ok_(self.vi.write(bus=self.bus, id=self.message_id, data=self.data) > 0)
self._check_received_message(self.can_message_queue.get(timeout=.5))
class PredefinedObd2RequestsTests(object):
def tearDown(self):
ok_(self.vi.set_predefined_obd2_requests(False))
def test_enable_predefined_obd2_requests_sends_messages(self):
ok_(self.vi.set_predefined_obd2_requests(True))
message = self.can_message_queue.get(timeout=6)
eq_(0x7df, message['id'])
eq_(1, message['bus'])
eq_(u"0x02010d0000000000", message['data'])
self.can_message_queue.task_done()
def test_disable_predefined_obd2_requests_stops_messages(self):
ok_(self.vi.set_predefined_obd2_requests(True))
message = self.can_message_queue.get(timeout=5)
ok_(message is not None)
ok_(self.vi.set_predefined_obd2_requests(False))
try:
while self.can_message_queue.get(timeout=.5) is None:
continue
except Empty:
pass
message = None
try:
message = self.can_message_queue.get(timeout=6)
except Empty:
pass
eq_(None, message)
def test_pid_request_set_after_support_query(self):
# TODO test that proper PID requests are sent if we response to the
# query properly. It's tough because we have to response in 100ms from
# the request, which with the delays on USB is difficult.
pass
def test_simple_vehicle_message_response_for_pid(self):
# TODO test that proper simple vehicle messages are sent if we reply to
# the PID request. difficult because of the reasons mentioned above.
pass
class PredefinedObd2RequestsTestsJson(JsonBaseTests,
PredefinedObd2RequestsTests):
def tearDown(self):
super(PredefinedObd2RequestsTestsJson, self).tearDown()
PredefinedObd2RequestsTests.tearDown(self)
class PredefinedObd2RequestsTestsProtobuf(ProtobufBaseTests,
PredefinedObd2RequestsTests):
def tearDown(self):
super(PredefinedObd2RequestsTestsProtobuf, self).tearDown()
PredefinedObd2RequestsTests.tearDown(self)
class SignalDecoderTests(object):
def test_decoder_publishes_its_own_message(self):
bus = 1
message_id = 0x49
data = "0x00000000000000ff"
ok_(self.vi.write(bus=bus, id=message_id, data=data))
message = self.simple_vehicle_message_queue.get(timeout=.5)
eq_(message['name'], "tire_pressure")
eq_(message['value'], "front_left")
eq_(message['event'], 0xff)
self.simple_vehicle_message_queue.task_done()
class SignalDecoderTestsJson(JsonBaseTests, SignalDecoderTests):
pass
class SignalDecoderTestsProtobuf(ProtobufBaseTests, SignalDecoderTests):
pass
class ManySignalsPerMessageTests(JsonBaseTests):
"""See https://github.com/openxc/vi-firmware/issues/306
"""
def setUp(self):
super(ManySignalsPerMessageTests, self).setUp()
self.message1_data = "0x3901A40351033204"
self.message2_data = "0xB3033403CA01E001"
def test_receive_all_16_signals(self):
ok_(self.vi.write(bus=1, id=0x663, data=self.message1_data))
ok_(self.vi.write(bus=1, id=0x664, data=self.message2_data))
keys = set()
while True:
try:
message = self.simple_vehicle_message_queue.get(timeout=.5)
keys.add(message['name'])
self.simple_vehicle_message_queue.task_done()
except Empty:
break
for i in range(1, 16):
ok_("signal%d" % i in keys, "Missing signal %d" % i)
Clear predefined OBD-II message status for each functional test.
from __future__ import absolute_import
import os
import time
import unittest
from nose.tools import eq_, ok_
import binascii
try:
from Queue import Queue, Empty
except ImportError:
# Python 3
from queue import Queue, Empty
from openxc.tools.common import configure_logging
from openxc.interface import UsbVehicleInterface, BluetoothVehicleInterface
SOURCE = None
def setUpModule():
configure_logging()
# A bit of a hack to let us pass the product ID in at the command line, so
# we can have 2 devices attached for testing at a time so it's more
# automated. Set the VI_FUNC_TESTS_USB_PRODUCT_ID environment variable to a
# number you want to use for the product ID.
usb_product_id = os.getenv('VI_FUNC_TESTS_USB_PRODUCT_ID', None)
use_bluetooth = os.getenv('VI_FUNC_TESTS_USE_BLUETOOTH', False)
global SOURCE
if use_bluetooth is not False:
SOURCE = BluetoothVehicleInterface(payload_format="json")
else:
SOURCE = UsbVehicleInterface(payload_format="json", product_id=usb_product_id)
SOURCE.start()
class ViFunctionalTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(ViFunctionalTests, cls).setUpClass()
global SOURCE
cls.vi = SOURCE
cls.vi.callback = cls.receive
def setUp(self):
self.bus = 1
self.message_id = 0x42
self.data = "0x1234"
ViFunctionalTests.can_message_queue = Queue()
ViFunctionalTests.simple_vehicle_message_queue = Queue()
ViFunctionalTests.diagnostic_response_queue = Queue()
self.vi.set_acceptance_filter_bypass(1, True)
self.vi.set_acceptance_filter_bypass(2, False)
ok_(self.vi.set_predefined_obd2_requests(False))
@classmethod
def receive(cls, message, **kwargs):
if ('id' in message and 'bus' in message and 'data' in message and
getattr(cls, 'can_message_queue', None)):
cls.can_message_queue.put(message)
elif ('name' in message and 'value' in message):
cls.simple_vehicle_message_queue.put(message)
elif ('id' in message and 'bus' in message and 'mode' in message):
cls.diagnostic_response_queue.put(message)
class ProtobufBaseTests(ViFunctionalTests):
@classmethod
def setUpClass(cls):
super(ProtobufBaseTests, cls).setUpClass()
if isinstance(cls.vi, BluetoothVehicleInterface):
raise unittest.SkipTest("Protobuf commands are not "
"supported on the Bluetooth interface")
cls.vi.set_payload_format("protobuf")
if not cls.vi.set_payload_format("protobuf"):
cls.vi.format = "json"
cls.vi.set_payload_format("protobuf")
class JsonBaseTests(ViFunctionalTests):
@classmethod
def setUpClass(cls):
super(JsonBaseTests, cls).setUpClass()
if not cls.vi.set_payload_format("json"):
cls.vi.format = "protobuf"
cls.vi.set_payload_format("json")
class ControlCommandTests(object):
def test_version(self):
# TODO it'd be nice to read this from src/version.c
eq_(self.vi.version(), "7.0.0-dev (functional_tests)")
def test_device_id(self):
device_id = self.vi.device_id()
ok_(device_id is not None)
if device_id != "Unknown":
eq_(len(device_id), 12)
class ControlCommandTestsJson(JsonBaseTests, ControlCommandTests):
pass
class ControlCommandTestsProtobuf(ProtobufBaseTests, ControlCommandTests):
pass
class CanMessageTests(object):
def _check_received_message(self, message):
eq_(self.message_id, message['id'])
eq_(self.bus, message['bus'])
eq_(self.data, message['data'])
self.can_message_queue.task_done()
def test_send_and_receive_can_message(self):
ok_(self.vi.set_acceptance_filter_bypass(1, False))
ok_(self.vi.write(bus=self.bus, id=self.message_id, data=self.data))
self._check_received_message(self.can_message_queue.get(timeout=.5))
def test_nonmatching_can_message_received_on_unfiltered_bus(self):
ok_(self.vi.set_acceptance_filter_bypass(1, True))
self.message_id += 1
ok_(self.vi.write(bus=self.bus, id=self.message_id, data=self.data))
self._check_received_message(self.can_message_queue.get(timeout=.5))
def test_matching_can_message_received_on_filtered_bus(self):
ok_(self.vi.set_acceptance_filter_bypass(2, False))
self.message_id = 0x43
self.bus = 2
ok_(self.vi.write(bus=self.bus, id=self.message_id, data=self.data))
self._check_received_message(self.can_message_queue.get(timeout=.5))
def test_nonmatching_can_message_not_received_on_filtered_bus(self):
ok_(self.vi.set_acceptance_filter_bypass(2, False))
self.bus = 2
ok_(self.vi.write(bus=self.bus, id=self.message_id, data=self.data))
try:
message = self.can_message_queue.get(timeout=.5)
except Empty:
pass
else:
eq_(None, message)
self.can_message_queue.task_done()
def test_send_and_receive_extended_can_frame(self):
self.message_id = 0x809
ok_(self.vi.write(bus=self.bus, id=self.message_id, data=self.data))
self._check_received_message(self.can_message_queue.get(timeout=.5))
def test_send_and_receive_extended_can_frame_after_toggling_af(self):
self.message_id = 0x809
ok_(self.vi.set_acceptance_filter_bypass(1, False))
ok_(self.vi.write(bus=self.bus, id=self.message_id, data=self.data))
message = None
try:
message = self.can_message_queue.get(timeout=.5)
except Empty:
pass
ok_(message is None)
ok_(self.vi.set_acceptance_filter_bypass(1, True))
ok_(self.vi.write(bus=self.bus, id=self.message_id, data=self.data))
self._check_received_message(self.can_message_queue.get(timeout=.5))
class CanMessageTestsJson(JsonBaseTests, CanMessageTests):
pass
class CanMessageTestsProtobuf(ProtobufBaseTests, CanMessageTests):
pass
class SimpleVehicleMessageTests(object):
def setUp(self):
self.message1_data = "0x0000000000000dac"
self.expected_signal_value = 0xdac
def test_receive_simple_vehicle_message_bus1(self):
ok_(self.vi.write(bus=1, id=0x42, data=self.message1_data))
message = self.simple_vehicle_message_queue.get(timeout=.5)
eq_(message['name'], "signal1")
eq_(message['value'], self.expected_signal_value)
self.simple_vehicle_message_queue.task_done()
def test_receive_simple_vehicle_message_bus2(self):
message_data = "0x8000000000000000"
ok_(self.vi.write(bus=2, id=0x43, data=message_data))
message = self.simple_vehicle_message_queue.get(timeout=.5)
eq_(message['name'], "signal2")
eq_(message['value'], 1)
self.simple_vehicle_message_queue.task_done()
class SimpleVehicleMessageTestsJson(JsonBaseTests, SimpleVehicleMessageTests):
def setUp(self):
super(SimpleVehicleMessageTestsJson, self).setUp()
SimpleVehicleMessageTests.setUp(self)
class SimpleVehicleMessageTestsProtobuf(ProtobufBaseTests,
SimpleVehicleMessageTests):
def setUp(self):
super(SimpleVehicleMessageTestsProtobuf, self).setUp()
SimpleVehicleMessageTests.setUp(self)
class DiagnosticRequestTests(object):
def setUp(self):
self.message_id = 0x121
self.mode = 3
self.bus = 1
self.pid = 1
self.payload = bytearray([0x12, 0x34])
def test_diagnostic_request(self):
# This test is done with bus 1, since that has the CAN AF off, so we
# can receive the sent message (via loopback) to validate it matches the
# request.
ok_(self.vi.create_diagnostic_request(self.message_id, self.mode,
bus=self.bus, pid=self.pid, payload=self.payload))
message = self.can_message_queue.get(timeout=.5)
eq_(self.message_id, message['id'])
eq_(self.bus, message['bus'])
eq_("0x04%02x%02x%s000000" % (self.mode, self.pid,
binascii.hexlify(self.payload)), message['data'])
self.can_message_queue.task_done()
def test_diagnostic_request_changes_acceptance_filters(self):
# This test is done with bus 2, since that has the CAN AF ON, so we
# make sure the VI can change the AF to accept responses.
# We use bus 2 since that should still have the AF on.
self.bus = 2
ok_(self.vi.create_diagnostic_request(self.message_id, self.mode,
bus=self.bus, pid=self.pid, payload=self.payload))
# send the response, which should be accepted by the AF
response_id = self.message_id + 0x8
# we don't care about the payload at this point, just want to make sure
# we receive the raw CAN message
ok_(self.vi.write(bus=self.bus, id=response_id, data="0xabcd"))
message = None
while message is None:
message = self.can_message_queue.get(timeout=.5)
if message['id'] == self.message_id:
# skip the request
continue
elif message['id'] == response_id:
break
ok_(message is not None)
self.can_message_queue.task_done()
def test_receive_diagnostic_response(self):
ok_(self.vi.create_diagnostic_request(self.message_id, self.mode, bus=self.bus,
pid=self.pid, payload=self.payload))
response_id = self.message_id + 0x8
# we don't care about the payload at this point, just want to make sure
# we receive the raw CAN message
ok_(self.vi.write(bus=self.bus, id=response_id, data="0x03430142"))
response = self.diagnostic_response_queue.get(timeout=.5)
eq_(self.message_id, response['id'])
eq_(self.bus, response['bus'])
eq_(self.mode, response['mode'])
eq_(self.pid, response['pid'])
ok_(response['success'])
eq_("0x42", response['payload'])
self.diagnostic_response_queue.task_done()
def test_receive_obd_formatted_diagnostic_response(self):
self.pid = 0xa
self.mode = 1
ok_(self.vi.create_diagnostic_request(self.message_id, self.mode, bus=self.bus,
pid=self.pid, payload=self.payload, decoded_type="obd2"))
response_id = self.message_id + 0x8
response_value = 0x42
ok_(self.vi.write(bus=self.bus, id=response_id, data="0x034%01x%02x%02x" % (
self.mode, self.pid, response_value)))
response = self.diagnostic_response_queue.get(timeout=.5)
eq_(self.message_id, response['id'])
eq_(self.bus, response['bus'])
eq_(self.mode, response['mode'])
eq_(self.pid, response['pid'])
ok_(response['success'])
eq_(response_value * 3, response['value'])
self.diagnostic_response_queue.task_done()
def test_create_recurring_request(self):
try:
ok_(self.vi.create_diagnostic_request(self.message_id, self.mode,
bus=self.bus, pid=self.pid, payload=self.payload,
frequency=10))
for _ in range(5):
message = self.can_message_queue.get(timeout=.5)
eq_(self.message_id, message['id'])
eq_(self.bus, message['bus'])
eq_("0x04%02x%02x%s000000" % (self.mode, self.pid,
binascii.hexlify(self.payload)), message['data'])
self.can_message_queue.task_done()
finally:
ok_(self.vi.delete_diagnostic_request(self.message_id, self.mode,
bus=self.bus, pid=self.pid))
def test_cancel_diagnostic_request(self):
ok_(self.vi.create_diagnostic_request(self.message_id, self.mode,
bus=self.bus, pid=self.pid, payload=self.payload,
frequency=5))
time.sleep(1)
ok_(self.vi.delete_diagnostic_request(self.message_id, self.mode,
bus=self.bus, pid=self.pid))
ViFunctionalTests.can_message_queue = Queue()
try:
self.can_message_queue.get(timeout=.5)
except Empty:
pass
else:
ok_(False)
class DiagnosticRequestTestsJson(JsonBaseTests, DiagnosticRequestTests):
def setUp(self):
super(DiagnosticRequestTestsJson, self).setUp()
DiagnosticRequestTests.setUp(self)
class DiagnosticRequestTestsProtobuf(ProtobufBaseTests, DiagnosticRequestTests):
def setUp(self):
super(DiagnosticRequestTestsProtobuf, self).setUp()
DiagnosticRequestTests.setUp(self)
class CanAcceptanceFilterChangeTests(object):
def _check_received_message(self, message):
eq_(self.message_id, message['id'])
eq_(self.bus, message['bus'])
eq_(self.data, message['data'])
self.can_message_queue.task_done()
def test_message_not_received_after_filters_enabled(self):
ok_(self.vi.set_acceptance_filter_bypass(1, False))
# Should receive only 42
ok_(self.vi.write(bus=self.bus, id=self.message_id, data=self.data))
self._check_received_message(self.can_message_queue.get(timeout=.5))
ok_(self.vi.write(bus=self.bus, id=self.message_id + 1, data=self.data))
try:
self.can_message_queue.get(timeout=.5)
except Empty:
pass
else:
ok_(False)
def test_nonmatching_can_message_received_on_unfiltered_bus(self):
ok_(self.vi.set_acceptance_filter_bypass(1, True))
self.message_id += 1
ok_(self.vi.write(bus=self.bus, id=self.message_id, data=self.data) > 0)
self._check_received_message(self.can_message_queue.get(timeout=.5))
class CanAcceptanceFilterChangeTestsJson(JsonBaseTests,
CanAcceptanceFilterChangeTests):
pass
class CanAcceptanceFilterChangeTestsProtobuf(ProtobufBaseTests,
CanAcceptanceFilterChangeTests):
pass
class PayloadFormatTests(ViFunctionalTests):
def tearDown(self):
ok_(self.vi.set_payload_format("json"))
def _check_received_message(self, message):
eq_(self.message_id, message['id'])
eq_(self.bus, message['bus'])
eq_(self.data, message['data'])
self.can_message_queue.task_done()
def test_change_to_binary(self):
if isinstance(self.vi, BluetoothVehicleInterface):
raise unittest.SkipTest("Protobuf commands are not "
"supported on the Bluetooth interface")
ok_(self.vi.set_payload_format("protobuf"))
ok_(self.vi.write(bus=self.bus, id=self.message_id, data=self.data) > 0)
self._check_received_message(self.can_message_queue.get(timeout=.5))
def test_change_to_json(self):
ok_(self.vi.set_payload_format("json"))
ok_(self.vi.write(bus=self.bus, id=self.message_id, data=self.data) > 0)
self._check_received_message(self.can_message_queue.get(timeout=.5))
class PredefinedObd2RequestsTests(object):
def tearDown(self):
ok_(self.vi.set_predefined_obd2_requests(False))
def test_enable_predefined_obd2_requests_sends_messages(self):
ok_(self.vi.set_predefined_obd2_requests(True))
message = self.can_message_queue.get(timeout=6)
eq_(0x7df, message['id'])
eq_(1, message['bus'])
eq_(u"0x02010d0000000000", message['data'])
self.can_message_queue.task_done()
def test_disable_predefined_obd2_requests_stops_messages(self):
ok_(self.vi.set_predefined_obd2_requests(True))
message = self.can_message_queue.get(timeout=5)
ok_(message is not None)
ok_(self.vi.set_predefined_obd2_requests(False))
try:
while self.can_message_queue.get(timeout=.5) is None:
continue
except Empty:
pass
message = None
try:
message = self.can_message_queue.get(timeout=6)
except Empty:
pass
eq_(None, message)
def test_pid_request_set_after_support_query(self):
# TODO test that proper PID requests are sent if we response to the
# query properly. It's tough because we have to response in 100ms from
# the request, which with the delays on USB is difficult.
pass
def test_simple_vehicle_message_response_for_pid(self):
# TODO test that proper simple vehicle messages are sent if we reply to
# the PID request. difficult because of the reasons mentioned above.
pass
class PredefinedObd2RequestsTestsJson(JsonBaseTests,
PredefinedObd2RequestsTests):
def tearDown(self):
super(PredefinedObd2RequestsTestsJson, self).tearDown()
PredefinedObd2RequestsTests.tearDown(self)
class PredefinedObd2RequestsTestsProtobuf(ProtobufBaseTests,
PredefinedObd2RequestsTests):
def tearDown(self):
super(PredefinedObd2RequestsTestsProtobuf, self).tearDown()
PredefinedObd2RequestsTests.tearDown(self)
class SignalDecoderTests(object):
def test_decoder_publishes_its_own_message(self):
bus = 1
message_id = 0x49
data = "0x00000000000000ff"
ok_(self.vi.write(bus=bus, id=message_id, data=data))
message = self.simple_vehicle_message_queue.get(timeout=.5)
eq_(message['name'], "tire_pressure")
eq_(message['value'], "front_left")
eq_(message['event'], 0xff)
self.simple_vehicle_message_queue.task_done()
class SignalDecoderTestsJson(JsonBaseTests, SignalDecoderTests):
pass
class SignalDecoderTestsProtobuf(ProtobufBaseTests, SignalDecoderTests):
pass
class ManySignalsPerMessageTests(JsonBaseTests):
"""See https://github.com/openxc/vi-firmware/issues/306
"""
def setUp(self):
super(ManySignalsPerMessageTests, self).setUp()
self.message1_data = "0x3901A40351033204"
self.message2_data = "0xB3033403CA01E001"
def test_receive_all_16_signals(self):
ok_(self.vi.write(bus=1, id=0x663, data=self.message1_data))
ok_(self.vi.write(bus=1, id=0x664, data=self.message2_data))
keys = set()
while True:
try:
message = self.simple_vehicle_message_queue.get(timeout=.5)
keys.add(message['name'])
self.simple_vehicle_message_queue.task_done()
except Empty:
break
for i in range(1, 16):
ok_("signal%d" % i in keys, "Missing signal %d" % i)
|
import errno
import os
import sys
import time
import traceback
from eventlet.green import urllib
from eventlet.green import socket
from eventlet.green import BaseHTTPServer
from eventlet.pool import Pool
import greenio
DEFAULT_MAX_SIMULTANEOUS_REQUESTS = 1024
DEFAULT_MAX_HTTP_VERSION = 'HTTP/1.1'
MAX_REQUEST_LINE = 8192
MINIMUM_CHUNK_SIZE = 4096
DEFAULT_LOG_FORMAT='%(client_ip)s - - [%(date_time)s] "%(request_line)s" %(status_code)s %(body_length)s %(wall_seconds).6f'
__all__ = ['server', 'format_date_time']
# Weekday and month names for HTTP date/time formatting; always English!
_weekdayname = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
_monthname = [None, # Dummy so we can use 1-based month numbers
"Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
def format_date_time(timestamp):
"""Formats a unix timestamp into an HTTP standard string."""
year, month, day, hh, mm, ss, wd, y, z = time.gmtime(timestamp)
return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
_weekdayname[wd], day, _monthname[month], year, hh, mm, ss
)
# Collections of error codes to compare against. Not all attributes are set
# on errno module on all platforms, so some are literals :(
BAD_SOCK = set((errno.EBADF, 10053))
BROKEN_SOCK = set((errno.EPIPE, errno.ECONNRESET))
def get_errno(err):
""" Simple method to get the error code out of socket.error objects. It
compensates for some cases where the code is not in the expected
location."""
try:
return err[0]
except IndexError:
return None
class Input(object):
def __init__(self,
rfile,
content_length,
wfile=None,
wfile_line=None,
chunked_input=False):
self.rfile = rfile
if content_length is not None:
content_length = int(content_length)
self.content_length = content_length
self.wfile = wfile
self.wfile_line = wfile_line
self.position = 0
self.chunked_input = chunked_input
self.chunk_length = -1
def _do_read(self, reader, length=None):
if self.wfile is not None:
## 100 Continue
self.wfile.write(self.wfile_line)
self.wfile = None
self.wfile_line = None
if length is None and self.content_length is not None:
length = self.content_length - self.position
if length and length > self.content_length - self.position:
length = self.content_length - self.position
if not length:
return ''
try:
read = reader(length)
except greenio.SSL.ZeroReturnError:
read = ''
self.position += len(read)
return read
def _chunked_read(self, rfile, length=None):
if self.wfile is not None:
## 100 Continue
self.wfile.write(self.wfile_line)
self.wfile = None
self.wfile_line = None
response = []
try:
if length is None:
if self.chunk_length > self.position:
response.append(rfile.read(self.chunk_length - self.position))
while self.chunk_length != 0:
self.chunk_length = int(rfile.readline(), 16)
response.append(rfile.read(self.chunk_length))
rfile.readline()
else:
while length > 0 and self.chunk_length != 0:
if self.chunk_length > self.position:
response.append(rfile.read(
min(self.chunk_length - self.position, length)))
length -= len(response[-1])
self.position += len(response[-1])
if self.chunk_length == self.position:
rfile.readline()
else:
self.chunk_length = int(rfile.readline(), 16)
self.position = 0
except greenio.SSL.ZeroReturnError:
pass
return ''.join(response)
def read(self, length=None):
if self.chunked_input:
return self._chunked_read(self.rfile, length)
return self._do_read(self.rfile.read, length)
def readline(self, size=None):
return self._do_read(self.rfile.readline)
def readlines(self, hint=None):
return self._do_read(self.rfile.readlines, hint)
def __iter__(self):
return iter(self.read())
class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler):
protocol_version = 'HTTP/1.1'
minimum_chunk_size = MINIMUM_CHUNK_SIZE
def setup(self):
# overriding SocketServer.setup to correctly handle SSL.Connection objects
conn = self.connection = self.request
try:
self.rfile = conn.makefile('rb', self.rbufsize)
self.wfile = conn.makefile('wb', self.wbufsize)
except (AttributeError, NotImplementedError):
if hasattr(conn, 'send') and hasattr(conn, 'recv'):
# it's an SSL.Connection
self.rfile = socket._fileobject(conn, "rb", self.rbufsize)
self.wfile = socket._fileobject(conn, "wb", self.wbufsize)
else:
# it's a SSLObject, or a martian
raise NotImplementedError("wsgi.py doesn't support sockets "\
"of type %s" % type(conn))
def handle_one_request(self):
if self.server.max_http_version:
self.protocol_version = self.server.max_http_version
if self.rfile.closed:
self.close_connection = 1
return
try:
self.raw_requestline = self.rfile.readline(MAX_REQUEST_LINE)
if len(self.raw_requestline) == MAX_REQUEST_LINE:
self.wfile.write(
"HTTP/1.0 414 Request URI Too Long\r\nConnection: close\r\nContent-length: 0\r\n\r\n")
self.close_connection = 1
return
except greenio.SSL.ZeroReturnError:
self.raw_requestline = ''
except socket.error, e:
if get_errno(e) not in BAD_SOCK:
raise
self.raw_requestline = ''
if not self.raw_requestline:
self.close_connection = 1
return
if not self.parse_request():
return
content_length = self.headers.getheader('content-length')
if content_length:
try:
int(content_length)
except ValueError:
self.wfile.write(
"HTTP/1.0 400 Bad Request\r\n"
"Connection: close\r\nContent-length: 0\r\n\r\n")
self.close_connection = 1
return
self.environ = self.get_environ()
self.application = self.server.app
try:
self.server.outstanding_requests += 1
try:
self.handle_one_response()
except socket.error, e:
# Broken pipe, connection reset by peer
if get_errno(e) not in BROKEN_SOCK:
raise
finally:
self.server.outstanding_requests -= 1
def handle_one_response(self):
start = time.time()
headers_set = []
headers_sent = []
wfile = self.wfile
result = None
use_chunked = [False]
length = [0]
status_code = [200]
def write(data, _writelines=wfile.writelines):
towrite = []
if not headers_set:
raise AssertionError("write() before start_response()")
elif not headers_sent:
status, response_headers = headers_set
headers_sent.append(1)
header_list = [header[0].lower() for header in response_headers]
towrite.append('%s %s\r\n' % (self.protocol_version, status))
for header in response_headers:
towrite.append('%s: %s\r\n' % header)
# send Date header?
if 'date' not in header_list:
towrite.append('Date: %s\r\n' % (format_date_time(time.time()),))
client_conn = self.headers.get('Connection', '').lower()
send_keep_alive = False
if self.server.keepalive and (client_conn == 'keep-alive' or \
(self.request_version == 'HTTP/1.1' and
not client_conn == 'close')):
# only send keep-alives back to clients that sent them,
# it's redundant for 1.1 connections
send_keep_alive = (client_conn == 'keep-alive')
self.close_connection = 0
else:
self.close_connection = 1
if self.request_version == 'HTTP/1.1' and 'content-length' not in header_list :
use_chunked[0] = True
towrite.append('Transfer-Encoding: chunked\r\n')
elif 'content-length' not in header_list:
# client is 1.0 and therefore must read to EOF
self.close_connection = 1
if self.close_connection:
towrite.append('Connection: close\r\n')
elif send_keep_alive:
towrite.append('Connection: keep-alive\r\n')
towrite.append('\r\n')
# end of header writing
if use_chunked[0]:
## Write the chunked encoding
towrite.append("%x\r\n%s\r\n" % (len(data), data))
else:
towrite.append(data)
try:
_writelines(towrite)
length[0] = length[0] + sum(map(len, towrite))
except UnicodeEncodeError:
print "Encountered unicode while attempting to write wsgi response: ", [x for x in towrite if isinstance(x, unicode)]
traceback.print_exc()
_writelines(
["HTTP/1.0 500 Internal Server Error\r\n",
"Connection: close\r\n",
"Content-type: text/plain\r\n",
"Content-length: 98\r\n",
"\r\n",
"Internal Server Error: wsgi application passed a unicode object to the server instead of a string."])
def start_response(status, response_headers, exc_info=None):
status_code[0] = status.split()[0]
if exc_info:
try:
if headers_sent:
# Re-raise original exception if headers sent
raise exc_info[0], exc_info[1], exc_info[2]
finally:
# Avoid dangling circular ref
exc_info = None
capitalized_headers = [('-'.join([x.capitalize() for x in key.split('-')]), value)
for key, value in response_headers]
headers_set[:] = [status, capitalized_headers]
return write
try:
try:
result = self.application(self.environ, start_response)
if not headers_sent and hasattr(result, '__len__') and \
'Content-Length' not in [h for h, v in headers_set[1]]:
headers_set[1].append(('Content-Length', str(sum(map(len, result)))))
towrite = []
towrite_size = 0
for data in result:
towrite.append(data)
towrite_size += len(data)
if towrite_size >= self.minimum_chunk_size:
write(''.join(towrite))
towrite = []
towrite_size = 0
if towrite:
write(''.join(towrite))
if not headers_sent or use_chunked[0]:
write('')
except Exception, e:
self.close_connection = 1
exc = traceback.format_exc()
print exc
if not headers_set:
start_response("500 Internal Server Error", [('Content-type', 'text/plain')])
write(exc)
finally:
if hasattr(result, 'close'):
result.close()
if self.environ['eventlet.input'].position < self.environ.get('CONTENT_LENGTH', 0):
## Read and discard body if there was no pending 100-continue
if not self.environ['eventlet.input'].wfile:
while self.environ['eventlet.input'].read(MINIMUM_CHUNK_SIZE):
pass
finish = time.time()
self.server.log_message(self.server.log_format % dict(
client_ip=self.get_client_ip(),
date_time=self.log_date_time_string(),
request_line=self.requestline,
status_code=status_code[0],
body_length=length[0],
wall_seconds=finish - start))
def get_client_ip(self):
client_ip = self.client_address[0]
if self.server.log_x_forwarded_for:
forward = self.headers.get('X-Forwarded-For', '').replace(' ', '')
if forward:
client_ip = "%s,%s" % (forward, client_ip)
return client_ip
def get_environ(self):
env = self.server.get_environ()
env['REQUEST_METHOD'] = self.command
env['SCRIPT_NAME'] = ''
if '?' in self.path:
path, query = self.path.split('?', 1)
else:
path, query = self.path, ''
env['PATH_INFO'] = urllib.unquote(path)
env['QUERY_STRING'] = query
if self.headers.typeheader is None:
env['CONTENT_TYPE'] = self.headers.type
else:
env['CONTENT_TYPE'] = self.headers.typeheader
length = self.headers.getheader('content-length')
if length:
env['CONTENT_LENGTH'] = length
env['SERVER_PROTOCOL'] = 'HTTP/1.0'
host, port = self.request.getsockname()
env['SERVER_NAME'] = host
env['SERVER_PORT'] = str(port)
env['REMOTE_ADDR'] = self.client_address[0]
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
for h in self.headers.headers:
k, v = h.split(':', 1)
k = k.replace('-', '_').upper()
v = v.strip()
if k in env:
continue
envk = 'HTTP_' + k
if envk in env:
env[envk] += ',' + v
else:
env[envk] = v
if env.get('HTTP_EXPECT') == '100-continue':
wfile = self.wfile
wfile_line = 'HTTP/1.1 100 Continue\r\n\r\n'
else:
wfile = None
wfile_line = None
chunked = env.get('HTTP_TRANSFER_ENCODING', '').lower() == 'chunked'
env['wsgi.input'] = env['eventlet.input'] = Input(
self.rfile, length, wfile=wfile, wfile_line=wfile_line,
chunked_input=chunked)
return env
def finish(self):
BaseHTTPServer.BaseHTTPRequestHandler.finish(self)
greenio.shutdown_safe(self.connection)
self.connection.close()
class Server(BaseHTTPServer.HTTPServer):
def __init__(self,
socket,
address,
app,
log=None,
environ=None,
max_http_version=None,
protocol=HttpProtocol,
minimum_chunk_size=None,
log_x_forwarded_for=True,
keepalive=True,
log_format=DEFAULT_LOG_FORMAT):
self.outstanding_requests = 0
self.socket = socket
self.address = address
if log:
self.log = log
else:
self.log = sys.stderr
self.app = app
self.keepalive = keepalive
self.environ = environ
self.max_http_version = max_http_version
self.protocol = protocol
self.pid = os.getpid()
if minimum_chunk_size is not None:
protocol.minimum_chunk_size = minimum_chunk_size
self.log_x_forwarded_for = log_x_forwarded_for
self.log_format = log_format
def get_environ(self):
socket = self.socket
d = {
'wsgi.errors': sys.stderr,
'wsgi.version': (1, 0),
'wsgi.multithread': True,
'wsgi.multiprocess': False,
'wsgi.run_once': False,
'wsgi.url_scheme': 'http',
}
if self.environ is not None:
d.update(self.environ)
return d
def process_request(self, (socket, address)):
proto = self.protocol(socket, address, self)
proto.handle()
def log_message(self, message):
self.log.write(message + '\n')
ACCEPT_SOCK = set((errno.EPIPE, errno.EBADF))
def server(sock, site,
log=None,
environ=None,
max_size=None,
max_http_version=DEFAULT_MAX_HTTP_VERSION,
protocol=HttpProtocol,
server_event=None,
minimum_chunk_size=None,
log_x_forwarded_for=True,
custom_pool=None,
keepalive=True,
log_format=DEFAULT_LOG_FORMAT):
""" Start up a wsgi server handling requests from the supplied server
socket. This function loops forever. The *sock* object will be closed after server exits,
but the underlying file descriptor will remain open, so if you have a dup() of *sock*,
it will remain usable.
:param sock: Server socket, must be already bound to a port and listening.
:param site: WSGI application function.
:param log: File-like object that logs should be written to. If not specified, sys.stderr is used.
:param environ: Additional parameters that go into the environ dictionary of every request.
:param max_size: Maximum number of client connections opened at any time by this server.
:param max_http_version: Set to "HTTP/1.0" to make the server pretend it only supports HTTP 1.0. The primary reason to do this is to prevent clients from keeping connections open with keepalives.
:param protocol: Protocol class. Deprecated.
:param server_event: Used to collect the Server object. Deprecated.
:param minimum_chunk_size: Minimum size in bytes for http chunks. This can be used to improve performance of applications which yield many small strings, though using it technically violates the WSGI spec.
:param log_x_forwarded_for: If True (the default), logs the contents of the x-forwarded-for header in addition to the actual client ip address in the 'client_ip' field of the log line.
:param custom_pool: A custom Pool instance which is used to spawn client green threads. If this is supplied, max_size is ignored.
:param log_format: A python format string that is used as the template to generate log lines. The following values can be formatted into it: client_ip, date_time, request_line, status_code, body_length, wall_seconds. Look the default for an example of how to use this.
"""
serv = Server(sock, sock.getsockname(),
site, log,
environ=None,
max_http_version=max_http_version,
protocol=protocol,
minimum_chunk_size=minimum_chunk_size,
log_x_forwarded_for=log_x_forwarded_for,
keepalive=keepalive,
log_format=log_format)
if server_event is not None:
server_event.send(serv)
if max_size is None:
max_size = DEFAULT_MAX_SIMULTANEOUS_REQUESTS
if custom_pool is not None:
pool = custom_pool
else:
pool = Pool(max_size=max_size)
try:
host, port = sock.getsockname()
port = ':%s' % (port, )
if hasattr(sock, 'do_handshake'):
scheme = 'https'
if port == ':443':
port = ''
else:
scheme = 'http'
if port == ':80':
port = ''
serv.log.write("(%s) wsgi starting up on %s://%s%s/\n" % (os.getpid(), scheme, host, port))
while True:
try:
try:
client_socket = sock.accept()
except socket.error, e:
if get_errno(e) not in ACCEPT_SOCK:
raise
pool.execute_async(serv.process_request, client_socket)
except (KeyboardInterrupt, SystemExit):
serv.log.write("wsgi exiting\n")
break
finally:
try:
# NOTE: It's not clear whether we want this to leave the
# socket open or close it. Use cases like Spawning want
# the underlying fd to remain open, but if we're going
# that far we might as well not bother closing sock at
# all.
sock.close()
except socket.error, e:
if get_errno(e) not in BROKEN_SOCK:
traceback.print_exc()
Cleaned up some wsgi stuff.
import errno
import os
import sys
import time
import traceback
from eventlet.green import urllib
from eventlet.green import socket
from eventlet.green import BaseHTTPServer
from eventlet.pool import Pool
import greenio
DEFAULT_MAX_SIMULTANEOUS_REQUESTS = 1024
DEFAULT_MAX_HTTP_VERSION = 'HTTP/1.1'
MAX_REQUEST_LINE = 8192
MINIMUM_CHUNK_SIZE = 4096
DEFAULT_LOG_FORMAT='%(client_ip)s - - [%(date_time)s] "%(request_line)s" %(status_code)s %(body_length)s %(wall_seconds).6f'
__all__ = ['server', 'format_date_time']
# Weekday and month names for HTTP date/time formatting; always English!
_weekdayname = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
_monthname = [None, # Dummy so we can use 1-based month numbers
"Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
def format_date_time(timestamp):
"""Formats a unix timestamp into an HTTP standard string."""
year, month, day, hh, mm, ss, wd, y, z = time.gmtime(timestamp)
return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
_weekdayname[wd], day, _monthname[month], year, hh, mm, ss
)
# Collections of error codes to compare against. Not all attributes are set
# on errno module on all platforms, so some are literals :(
BAD_SOCK = set((errno.EBADF, 10053))
BROKEN_SOCK = set((errno.EPIPE, errno.ECONNRESET))
def get_errno(err):
""" Simple method to get the error code out of socket.error objects. It
compensates for some cases where the code is not in the expected
location."""
try:
return err[0]
except IndexError:
return None
class Input(object):
def __init__(self,
rfile,
content_length,
wfile=None,
wfile_line=None,
chunked_input=False):
self.rfile = rfile
if content_length is not None:
content_length = int(content_length)
self.content_length = content_length
self.wfile = wfile
self.wfile_line = wfile_line
self.position = 0
self.chunked_input = chunked_input
self.chunk_length = -1
def _do_read(self, reader, length=None):
if self.wfile is not None:
## 100 Continue
self.wfile.write(self.wfile_line)
self.wfile = None
self.wfile_line = None
if length is None and self.content_length is not None:
length = self.content_length - self.position
if length and length > self.content_length - self.position:
length = self.content_length - self.position
if not length:
return ''
try:
read = reader(length)
except greenio.SSL.ZeroReturnError:
read = ''
self.position += len(read)
return read
def _chunked_read(self, rfile, length=None):
if self.wfile is not None:
## 100 Continue
self.wfile.write(self.wfile_line)
self.wfile = None
self.wfile_line = None
response = []
try:
if length is None:
if self.chunk_length > self.position:
response.append(rfile.read(self.chunk_length - self.position))
while self.chunk_length != 0:
self.chunk_length = int(rfile.readline(), 16)
response.append(rfile.read(self.chunk_length))
rfile.readline()
else:
while length > 0 and self.chunk_length != 0:
if self.chunk_length > self.position:
response.append(rfile.read(
min(self.chunk_length - self.position, length)))
length -= len(response[-1])
self.position += len(response[-1])
if self.chunk_length == self.position:
rfile.readline()
else:
self.chunk_length = int(rfile.readline(), 16)
self.position = 0
except greenio.SSL.ZeroReturnError:
pass
return ''.join(response)
def read(self, length=None):
if self.chunked_input:
return self._chunked_read(self.rfile, length)
return self._do_read(self.rfile.read, length)
def readline(self, size=None):
return self._do_read(self.rfile.readline)
def readlines(self, hint=None):
return self._do_read(self.rfile.readlines, hint)
def __iter__(self):
return iter(self.read())
class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler):
protocol_version = 'HTTP/1.1'
minimum_chunk_size = MINIMUM_CHUNK_SIZE
def setup(self):
# overriding SocketServer.setup to correctly handle SSL.Connection objects
conn = self.connection = self.request
try:
self.rfile = conn.makefile('rb', self.rbufsize)
self.wfile = conn.makefile('wb', self.wbufsize)
except (AttributeError, NotImplementedError):
if hasattr(conn, 'send') and hasattr(conn, 'recv'):
# it's an SSL.Connection
self.rfile = socket._fileobject(conn, "rb", self.rbufsize)
self.wfile = socket._fileobject(conn, "wb", self.wbufsize)
else:
# it's a SSLObject, or a martian
raise NotImplementedError("wsgi.py doesn't support sockets "\
"of type %s" % type(conn))
def handle_one_request(self):
if self.server.max_http_version:
self.protocol_version = self.server.max_http_version
if self.rfile.closed:
self.close_connection = 1
return
try:
self.raw_requestline = self.rfile.readline(MAX_REQUEST_LINE)
if len(self.raw_requestline) == MAX_REQUEST_LINE:
self.wfile.write(
"HTTP/1.0 414 Request URI Too Long\r\nConnection: close\r\nContent-length: 0\r\n\r\n")
self.close_connection = 1
return
except greenio.SSL.ZeroReturnError:
self.raw_requestline = ''
except socket.error, e:
if get_errno(e) not in BAD_SOCK:
raise
self.raw_requestline = ''
if not self.raw_requestline:
self.close_connection = 1
return
if not self.parse_request():
return
content_length = self.headers.getheader('content-length')
if content_length:
try:
int(content_length)
except ValueError:
self.wfile.write(
"HTTP/1.0 400 Bad Request\r\n"
"Connection: close\r\nContent-length: 0\r\n\r\n")
self.close_connection = 1
return
self.environ = self.get_environ()
self.application = self.server.app
try:
self.server.outstanding_requests += 1
try:
self.handle_one_response()
except socket.error, e:
# Broken pipe, connection reset by peer
if get_errno(e) not in BROKEN_SOCK:
raise
finally:
self.server.outstanding_requests -= 1
def handle_one_response(self):
start = time.time()
headers_set = []
headers_sent = []
wfile = self.wfile
result = None
use_chunked = [False]
length = [0]
status_code = [200]
def write(data, _writelines=wfile.writelines):
towrite = []
if not headers_set:
raise AssertionError("write() before start_response()")
elif not headers_sent:
status, response_headers = headers_set
headers_sent.append(1)
header_list = [header[0].lower() for header in response_headers]
towrite.append('%s %s\r\n' % (self.protocol_version, status))
for header in response_headers:
towrite.append('%s: %s\r\n' % header)
# send Date header?
if 'date' not in header_list:
towrite.append('Date: %s\r\n' % (format_date_time(time.time()),))
client_conn = self.headers.get('Connection', '').lower()
send_keep_alive = False
if self.server.keepalive and (client_conn == 'keep-alive' or \
(self.request_version == 'HTTP/1.1' and
not client_conn == 'close')):
# only send keep-alives back to clients that sent them,
# it's redundant for 1.1 connections
send_keep_alive = (client_conn == 'keep-alive')
self.close_connection = 0
else:
self.close_connection = 1
if 'content-length' not in header_list:
if self.request_version == 'HTTP/1.1':
use_chunked[0] = True
towrite.append('Transfer-Encoding: chunked\r\n')
elif 'content-length' not in header_list:
# client is 1.0 and therefore must read to EOF
self.close_connection = 1
if self.close_connection:
towrite.append('Connection: close\r\n')
elif send_keep_alive:
towrite.append('Connection: keep-alive\r\n')
towrite.append('\r\n')
# end of header writing
if use_chunked[0]:
## Write the chunked encoding
towrite.append("%x\r\n%s\r\n" % (len(data), data))
else:
towrite.append(data)
try:
_writelines(towrite)
length[0] = length[0] + sum(map(len, towrite))
except UnicodeEncodeError:
print "Encountered unicode while attempting to write wsgi response: ", [x for x in towrite if isinstance(x, unicode)]
traceback.print_exc()
_writelines(
["HTTP/1.0 500 Internal Server Error\r\n",
"Connection: close\r\n",
"Content-type: text/plain\r\n",
"Content-length: 98\r\n",
"\r\n",
"Internal Server Error: wsgi application passed a unicode object to the server instead of a string."])
def start_response(status, response_headers, exc_info=None):
status_code[0] = status.split()[0]
if exc_info:
try:
if headers_sent:
# Re-raise original exception if headers sent
raise exc_info[0], exc_info[1], exc_info[2]
finally:
# Avoid dangling circular ref
exc_info = None
capitalized_headers = [('-'.join([x.capitalize() for x in key.split('-')]), value)
for key, value in response_headers]
headers_set[:] = [status, capitalized_headers]
return write
try:
try:
result = self.application(self.environ, start_response)
if not headers_sent and hasattr(result, '__len__') and \
'Content-Length' not in [h for h, v in headers_set[1]]:
headers_set[1].append(('Content-Length', str(sum(map(len, result)))))
towrite = []
towrite_size = 0
for data in result:
towrite.append(data)
towrite_size += len(data)
if towrite_size >= self.minimum_chunk_size:
write(''.join(towrite))
towrite = []
towrite_size = 0
if towrite:
write(''.join(towrite))
if not headers_sent or use_chunked[0]:
write('')
except Exception, e:
self.close_connection = 1
exc = traceback.format_exc()
print exc
if not headers_set:
start_response("500 Internal Server Error", [('Content-type', 'text/plain')])
write(exc)
finally:
if hasattr(result, 'close'):
result.close()
if self.environ['eventlet.input'].position < self.environ.get('CONTENT_LENGTH', 0):
## Read and discard body if there was no pending 100-continue
if not self.environ['eventlet.input'].wfile:
while self.environ['eventlet.input'].read(MINIMUM_CHUNK_SIZE):
pass
finish = time.time()
self.server.log_message(self.server.log_format % dict(
client_ip=self.get_client_ip(),
date_time=self.log_date_time_string(),
request_line=self.requestline,
status_code=status_code[0],
body_length=length[0],
wall_seconds=finish - start))
def get_client_ip(self):
client_ip = self.client_address[0]
if self.server.log_x_forwarded_for:
forward = self.headers.get('X-Forwarded-For', '').replace(' ', '')
if forward:
client_ip = "%s,%s" % (forward, client_ip)
return client_ip
def get_environ(self):
env = self.server.get_environ()
env['REQUEST_METHOD'] = self.command
env['SCRIPT_NAME'] = ''
if '?' in self.path:
path, query = self.path.split('?', 1)
else:
path, query = self.path, ''
env['PATH_INFO'] = urllib.unquote(path)
env['QUERY_STRING'] = query
if self.headers.typeheader is None:
env['CONTENT_TYPE'] = self.headers.type
else:
env['CONTENT_TYPE'] = self.headers.typeheader
length = self.headers.getheader('content-length')
if length:
env['CONTENT_LENGTH'] = length
env['SERVER_PROTOCOL'] = 'HTTP/1.0'
host, port = self.request.getsockname()
env['SERVER_NAME'] = host
env['SERVER_PORT'] = str(port)
env['REMOTE_ADDR'] = self.client_address[0]
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
for h in self.headers.headers:
k, v = h.split(':', 1)
k = k.replace('-', '_').upper()
v = v.strip()
if k in env:
continue
envk = 'HTTP_' + k
if envk in env:
env[envk] += ',' + v
else:
env[envk] = v
if env.get('HTTP_EXPECT') == '100-continue':
wfile = self.wfile
wfile_line = 'HTTP/1.1 100 Continue\r\n\r\n'
else:
wfile = None
wfile_line = None
chunked = env.get('HTTP_TRANSFER_ENCODING', '').lower() == 'chunked'
env['wsgi.input'] = env['eventlet.input'] = Input(
self.rfile, length, wfile=wfile, wfile_line=wfile_line,
chunked_input=chunked)
return env
def finish(self):
BaseHTTPServer.BaseHTTPRequestHandler.finish(self)
greenio.shutdown_safe(self.connection)
self.connection.close()
class Server(BaseHTTPServer.HTTPServer):
def __init__(self,
socket,
address,
app,
log=None,
environ=None,
max_http_version=None,
protocol=HttpProtocol,
minimum_chunk_size=None,
log_x_forwarded_for=True,
keepalive=True,
log_format=DEFAULT_LOG_FORMAT):
self.outstanding_requests = 0
self.socket = socket
self.address = address
if log:
self.log = log
else:
self.log = sys.stderr
self.app = app
self.keepalive = keepalive
self.environ = environ
self.max_http_version = max_http_version
self.protocol = protocol
self.pid = os.getpid()
if minimum_chunk_size is not None:
protocol.minimum_chunk_size = minimum_chunk_size
self.log_x_forwarded_for = log_x_forwarded_for
self.log_format = log_format
def get_environ(self):
socket = self.socket
d = {
'wsgi.errors': sys.stderr,
'wsgi.version': (1, 0),
'wsgi.multithread': True,
'wsgi.multiprocess': False,
'wsgi.run_once': False,
'wsgi.url_scheme': 'http',
}
if self.environ is not None:
d.update(self.environ)
return d
def process_request(self, (socket, address)):
proto = self.protocol(socket, address, self)
proto.handle()
def log_message(self, message):
self.log.write(message + '\n')
ACCEPT_SOCK = set((errno.EPIPE, errno.EBADF))
def server(sock, site,
log=None,
environ=None,
max_size=None,
max_http_version=DEFAULT_MAX_HTTP_VERSION,
protocol=HttpProtocol,
server_event=None,
minimum_chunk_size=None,
log_x_forwarded_for=True,
custom_pool=None,
keepalive=True,
log_format=DEFAULT_LOG_FORMAT):
""" Start up a wsgi server handling requests from the supplied server
socket. This function loops forever. The *sock* object will be closed after server exits,
but the underlying file descriptor will remain open, so if you have a dup() of *sock*,
it will remain usable.
:param sock: Server socket, must be already bound to a port and listening.
:param site: WSGI application function.
:param log: File-like object that logs should be written to. If not specified, sys.stderr is used.
:param environ: Additional parameters that go into the environ dictionary of every request.
:param max_size: Maximum number of client connections opened at any time by this server.
:param max_http_version: Set to "HTTP/1.0" to make the server pretend it only supports HTTP 1.0. The primary reason to do this is to prevent clients from keeping connections open with keepalives.
:param protocol: Protocol class. Deprecated.
:param server_event: Used to collect the Server object. Deprecated.
:param minimum_chunk_size: Minimum size in bytes for http chunks. This can be used to improve performance of applications which yield many small strings, though using it technically violates the WSGI spec.
:param log_x_forwarded_for: If True (the default), logs the contents of the x-forwarded-for header in addition to the actual client ip address in the 'client_ip' field of the log line.
:param custom_pool: A custom Pool instance which is used to spawn client green threads. If this is supplied, max_size is ignored.
:param keepalive: If set to False, disables keepalives on the server; all connections will be closed after serving one request.
:param log_format: A python format string that is used as the template to generate log lines. The following values can be formatted into it: client_ip, date_time, request_line, status_code, body_length, wall_seconds. Look the default for an example of how to use this.
"""
serv = Server(sock, sock.getsockname(),
site, log,
environ=None,
max_http_version=max_http_version,
protocol=protocol,
minimum_chunk_size=minimum_chunk_size,
log_x_forwarded_for=log_x_forwarded_for,
keepalive=keepalive,
log_format=log_format)
if server_event is not None:
server_event.send(serv)
if max_size is None:
max_size = DEFAULT_MAX_SIMULTANEOUS_REQUESTS
if custom_pool is not None:
pool = custom_pool
else:
pool = Pool(max_size=max_size)
try:
host, port = sock.getsockname()
port = ':%s' % (port, )
if hasattr(sock, 'do_handshake'):
scheme = 'https'
if port == ':443':
port = ''
else:
scheme = 'http'
if port == ':80':
port = ''
serv.log.write("(%s) wsgi starting up on %s://%s%s/\n" % (os.getpid(), scheme, host, port))
while True:
try:
try:
client_socket = sock.accept()
except socket.error, e:
if get_errno(e) not in ACCEPT_SOCK:
raise
pool.execute_async(serv.process_request, client_socket)
except (KeyboardInterrupt, SystemExit):
serv.log.write("wsgi exiting\n")
break
finally:
try:
# NOTE: It's not clear whether we want this to leave the
# socket open or close it. Use cases like Spawning want
# the underlying fd to remain open, but if we're going
# that far we might as well not bother closing sock at
# all.
sock.close()
except socket.error, e:
if get_errno(e) not in BROKEN_SOCK:
traceback.print_exc()
|
#!/usr/bin/env python
# coding=utf8
import json, sys, os, datetime
from setting import useport, CACHE_TIMEOUT
from flask import Flask, g, request, Response, session, redirect, render_template
from flask.templating import DispatchingJinjaLoader
from flask.globals import _request_ctx_stack
from flask.ext.sqlalchemy import SQLAlchemy
from werkzeug.contrib.cache import SimpleCache
from werkzeug.routing import BaseConverter
from util.session import Session
from blueprint.task.views import task
from blueprint.script.views import script
from blueprint.user.views import user
cache = SimpleCache()
def cached(func):
def decorator(*args, **kwargs):
key = request.path + '&'.join(['%s=%s'%(k,v.encode('utf8')) for k,v in
request.args.items()])
response = cache.get(key)
if response is None:
print 'call func:', key
response = func(*args, **kwargs)
cache.set(key, response, CACHE_TIMEOUT)
return response
return decorator
class LeafinlineLoader(DispatchingJinjaLoader):
def _iter_loaders(self, template):
bp = _request_ctx_stack.top.request.blueprint
if bp is not None and bp in self.app.blueprints:
loader = self.app.blueprints[bp].jinja_loader
if loader is not None:
yield loader, template
loader = self.app.jinja_loader
if loader is not None:
yield loader, template
app = Flask(__name__, static_folder='static', static_path='/static', template_folder='template')
app.config.from_object(__name__)
app.secret_key = 'super secret key'
app.config['SESSION_TYPE'] = 'redis'
app.config['SESSION_PERMANENT'] = False
app.permanent_session_lifetime = datetime.timedelta(days=1)
Session(app)
# app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://hotel2:hotel0115@58.83.130.112:3306/hotel20'
# db = SQLAlchemy(app)
# g['db'] = db
app.jinja_options = Flask.jinja_options.copy()
app.jinja_options['loader'] = LeafinlineLoader(app)
app.register_blueprint(admin, url_prefix='/api/task')
app.register_blueprint(script, url_prefix='/api/script')
app.register_blueprint(user, url_prefix='/api/user')
@app.route('/', methods=['GET'])
def index():
return render_template('index.html', appname=g.appname)
# @app.context_processor
# def override_url_for():
# return dict(url_for=static_url_for)
# def static_url_for(endpoint, **values):
# if endpoint == 'static':
# filename = values.get('filename', None)
# if filename:
# file_path = STATIC_URL_ROOT + filename
# return file_path
# else:
# return url_for(endpoint, **values)
class RegexConverter(BaseConverter):
def __init__(self, map, *args):
self.map = map
self.regex = args[0]
app.url_map.converters['regex'] = RegexConverter
# def after_this_request(f):
# if not hasattr(g, 'after_request_callbacks'):
# g.after_request_callbacks = []
# g.after_request_callbacks.append(f)
# return f
@app.before_request
def is_login():
# sid = request.cookies.get('sid')
# user = session.get(sid, None)
g.appname = 'pholcus'
# flag = request.url == request.url_root or '/task/data/' in request.url or '/static/' in request.url or '/login' in request.url or '/register' in request.url
# if '/api/' in request.url and user is None:
# user = {'status': 1, '_id': '7', 'group': 'developer', 'name': 'root'}
# request.sid = sid
# request.user = user
# if flag:
# pass
# elif user is None:
# return redirect('/api/a/login')
# elif not user.get('status') == 1:
# return redirect('/api/a/info')
# else:
# pass
request.user = {'status': 1, '_id': '7', 'group': 'developer', 'name': 'root'}
# @app.after_request
# def call_after_request_callbacks(response):
# pass
if __name__ == "__main__":
from werkzeug.serving import run_simple
print("Launching server at port %d" % useport)
run_simple('0.0.0.0', useport, app, use_reloader=True,
passthrough_errors=True, threaded=True)
print("Server sucessfully terminated")
fix bug
#!/usr/bin/env python
# coding=utf8
import json, sys, os, datetime
from setting import useport, CACHE_TIMEOUT
from flask import Flask, g, request, Response, session, redirect, render_template
from flask.templating import DispatchingJinjaLoader
from flask.globals import _request_ctx_stack
from flask.ext.sqlalchemy import SQLAlchemy
from werkzeug.contrib.cache import SimpleCache
from werkzeug.routing import BaseConverter
from util.session import Session
from blueprint.task.views import task
from blueprint.script.views import script
from blueprint.user.views import user
cache = SimpleCache()
def cached(func):
def decorator(*args, **kwargs):
key = request.path + '&'.join(['%s=%s'%(k,v.encode('utf8')) for k,v in
request.args.items()])
response = cache.get(key)
if response is None:
print 'call func:', key
response = func(*args, **kwargs)
cache.set(key, response, CACHE_TIMEOUT)
return response
return decorator
class LeafinlineLoader(DispatchingJinjaLoader):
def _iter_loaders(self, template):
bp = _request_ctx_stack.top.request.blueprint
if bp is not None and bp in self.app.blueprints:
loader = self.app.blueprints[bp].jinja_loader
if loader is not None:
yield loader, template
loader = self.app.jinja_loader
if loader is not None:
yield loader, template
app = Flask(__name__, static_folder='static', static_path='/static', template_folder='template')
app.config.from_object(__name__)
app.secret_key = 'super secret key'
app.config['SESSION_TYPE'] = 'redis'
app.config['SESSION_PERMANENT'] = False
app.permanent_session_lifetime = datetime.timedelta(days=1)
Session(app)
# app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://hotel2:hotel0115@58.83.130.112:3306/hotel20'
# db = SQLAlchemy(app)
# g['db'] = db
app.jinja_options = Flask.jinja_options.copy()
app.jinja_options['loader'] = LeafinlineLoader(app)
app.register_blueprint(admin, url_prefix='/api/task')
app.register_blueprint(script, url_prefix='/api/script')
app.register_blueprint(user, url_prefix='/api/user')
@app.route('/', methods=['GET'])
def index():
return render_template('index.html', appname=g.appname)
def allow_cross_domain(fun):
@wraps(fun)
def wrapper_fun(*args, **kwargs):
rst = fun(*args, **kwargs)
if type(rst) == str:
rst = make_response(fun(*args, **kwargs))
rst.headers['Access-Control-Allow-Origin'] = '*'
return rst
return wrapper_fun
class CJsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime):
return obj.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(obj, date):
return obj.strftime('%Y-%m-%d')
else:
return json.JSONEncoder.default(self, obj)
# @app.context_processor
# def override_url_for():
# return dict(url_for=static_url_for)
# def static_url_for(endpoint, **values):
# if endpoint == 'static':
# filename = values.get('filename', None)
# if filename:
# file_path = STATIC_URL_ROOT + filename
# return file_path
# else:
# return url_for(endpoint, **values)
class RegexConverter(BaseConverter):
def __init__(self, map, *args):
self.map = map
self.regex = args[0]
app.url_map.converters['regex'] = RegexConverter
# def after_this_request(f):
# if not hasattr(g, 'after_request_callbacks'):
# g.after_request_callbacks = []
# g.after_request_callbacks.append(f)
# return f
@app.before_request
def is_login():
# sid = request.cookies.get('sid')
# user = session.get(sid, None)
g.appname = 'pholcus'
# flag = request.url == request.url_root or '/task/data/' in request.url or '/static/' in request.url or '/login' in request.url or '/register' in request.url
# if '/api/' in request.url and user is None:
# user = {'status': 1, '_id': '7', 'group': 'developer', 'name': 'root'}
# request.sid = sid
# request.user = user
# if flag:
# pass
# elif user is None:
# return redirect('/api/a/login')
# elif not user.get('status') == 1:
# return redirect('/api/a/info')
# else:
# pass
request.user = {'status': 1, '_id': '7', 'group': 'developer', 'name': 'root'}
# @app.after_request
# def call_after_request_callbacks(response):
# pass
if __name__ == "__main__":
from werkzeug.serving import run_simple
print("Launching server at port %d" % useport)
run_simple('0.0.0.0', useport, app, use_reloader=True,
passthrough_errors=True, threaded=True)
print("Server sucessfully terminated")
|
'''
This module provides utilities to get the absolute filenames so that we can be sure that:
- The case of a file will match the actual file in the filesystem (otherwise breakpoints won't be hit).
- Providing means for the user to make path conversions when doing a remote debugging session in
one machine and debugging in another.
To do that, the PATHS_FROM_CLIENT_TO_SERVER constant must be filled with the appropriate paths.
E.g.:
If the server has the structure
/user/projects/my_project/src/package/module1.py
and the client has:
c:\my_project\src\package\module1.py
the PATHS_FROM_CLIENT_TO_SERVER would have to be:
PATHS_FROM_CLIENT_TO_SERVER = [(r'c:\my_project\src', r'/user/projects/my_project/src')]
@note: DEBUG_CLIENT_SERVER_TRANSLATION can be set to True to debug the result of those translations
@note: the case of the paths is important! Note that this can be tricky to get right when one machine
uses a case-independent filesystem and the other uses a case-dependent filesystem (if the system being
debugged is case-independent, 'normcase()' should be used on the paths defined in PATHS_FROM_CLIENT_TO_SERVER).
@note: all the paths with breakpoints must be translated (otherwise they won't be found in the server)
@note: to enable remote debugging in the target machine (pydev extensions in the eclipse installation)
import pydevd;pydevd.settrace(host, stdoutToServer, stderrToServer, port, suspend)
see parameter docs on pydevd.py
@note: for doing a remote debugging session, all the pydevd_ files must be on the server accessible
through the PYTHONPATH (and the PATHS_FROM_CLIENT_TO_SERVER only needs to be set on the target
machine for the paths that'll actually have breakpoints).
'''
from pydevd_constants import * #@UnusedWildImport
import os.path
import sys
import traceback
normcase = os.path.normcase
basename = os.path.basename
exists = os.path.exists
join = os.path.join
try:
rPath = os.path.realpath #@UndefinedVariable
except:
# jython does not support os.path.realpath
# realpath is a no-op on systems without islink support
rPath = os.path.abspath
#defined as a list of tuples where the 1st element of the tuple is the path in the client machine
#and the 2nd element is the path in the server machine.
#see module docstring for more details.
PATHS_FROM_CLIENT_TO_SERVER = []
#example:
#PATHS_FROM_CLIENT_TO_SERVER = [
#(normcase(r'd:\temp\temp_workspace_2\test_python\src\yyy\yyy'),
# normcase(r'd:\temp\temp_workspace_2\test_python\src\hhh\xxx'))]
DEBUG_CLIENT_SERVER_TRANSLATION = False
#caches filled as requested during the debug session
NORM_FILENAME_CONTAINER = {}
NORM_FILENAME_AND_BASE_CONTAINER = {}
NORM_FILENAME_TO_SERVER_CONTAINER = {}
NORM_FILENAME_TO_CLIENT_CONTAINER = {}
def _NormFile(filename):
try:
return NORM_FILENAME_CONTAINER[filename]
except KeyError:
r = normcase(rPath(filename))
#cache it for fast access later
NORM_FILENAME_CONTAINER[filename] = r
return r
#Now, let's do a quick test to see if we're working with a version of python that has no problems
#related to the names generated...
try:
try:
code = rPath.func_code
except AttributeError:
code = rPath.__code__
if not exists(_NormFile(code.co_filename)):
sys.stderr.write('-------------------------------------------------------------------------------\n')
sys.stderr.write('pydev debugger: CRITICAL WARNING: This version of python seems to be incorrectly compiled (internal generated filenames are not absolute)\n')
sys.stderr.write('pydev debugger: The debugger may still function, but it will work slower and may miss breakpoints.\n')
sys.stderr.write('pydev debugger: Related bug: http://bugs.python.org/issue1666807\n')
sys.stderr.write('-------------------------------------------------------------------------------\n')
initial_norm_file = _NormFile
def _NormFile(filename): #Let's redefine _NormFile to work with paths that may be incorrect
ret = initial_norm_file(filename)
if not exists(ret):
#We must actually go on and check if we can find it as if it was a relative path for some of the paths in the pythonpath
for path in sys.path:
ret = initial_norm_file(join(path, filename))
if exists(ret):
break
else:
sys.stderr.write('pydev debugger: Unable to find real location for: %s\n' % (filename,))
ret = filename
return ret
except:
#Don't fail if there's something not correct here -- but at least print it to the user so that we can correct that
traceback.print_exc()
if PATHS_FROM_CLIENT_TO_SERVER:
#only setup translation functions if absolutely needed!
def NormFileToServer(filename):
try:
return NORM_FILENAME_TO_SERVER_CONTAINER[filename]
except KeyError:
#used to translate a path from the client to the debug server
translated = normcase(filename)
for client_prefix, server_prefix in PATHS_FROM_CLIENT_TO_SERVER:
if translated.startswith(client_prefix):
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: replacing to server: %s\n' % (translated,))
translated = translated.replace(client_prefix, server_prefix)
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: sent to server: %s\n' % (translated,))
break
else:
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: unable to find matching prefix for: %s in %s\n' % \
(translated, [x[0] for x in PATHS_FROM_CLIENT_TO_SERVER]))
ret = _NormFile(translated)
NORM_FILENAME_TO_SERVER_CONTAINER[filename] = translated
return ret
def NormFileToClient(filename):
try:
return NORM_FILENAME_TO_CLIENT_CONTAINER[filename]
except KeyError:
#used to translate a path from the debug server to the client
translated = normcase(filename)
for client_prefix, server_prefix in PATHS_FROM_CLIENT_TO_SERVER:
if translated.startswith(server_prefix):
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: replacing to client: %s\n' % (translated,))
translated = translated.replace(server_prefix, client_prefix)
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: sent to client: %s\n' % (translated,))
break
else:
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: unable to find matching prefix for: %s in %s\n' % \
(translated, [x[1] for x in PATHS_FROM_CLIENT_TO_SERVER]))
ret = _NormFile(translated)
NORM_FILENAME_TO_CLIENT_CONTAINER[filename] = ret
return ret
else:
#no translation step needed (just inline the calls)
NormFileToClient = _NormFile
NormFileToServer = _NormFile
def GetFilenameAndBase(frame):
#This one is just internal (so, does not need any kind of client-server translation)
f = frame.f_code.co_filename
try:
return NORM_FILENAME_AND_BASE_CONTAINER[f]
except KeyError:
filename = _NormFile(f)
base = basename(filename)
NORM_FILENAME_AND_BASE_CONTAINER[f] = filename, base
return filename, base
Properly translating slashes on client/server debug. https://sourceforge.net/tracker/?func=detail&aid=2900544&group_id=85796&atid=577329
'''
This module provides utilities to get the absolute filenames so that we can be sure that:
- The case of a file will match the actual file in the filesystem (otherwise breakpoints won't be hit).
- Providing means for the user to make path conversions when doing a remote debugging session in
one machine and debugging in another.
To do that, the PATHS_FROM_CLIENT_TO_SERVER constant must be filled with the appropriate paths.
E.g.:
If the server has the structure
/user/projects/my_project/src/package/module1.py
and the client has:
c:\my_project\src\package\module1.py
the PATHS_FROM_CLIENT_TO_SERVER would have to be:
PATHS_FROM_CLIENT_TO_SERVER = [(r'c:\my_project\src', r'/user/projects/my_project/src')]
@note: DEBUG_CLIENT_SERVER_TRANSLATION can be set to True to debug the result of those translations
@note: the case of the paths is important! Note that this can be tricky to get right when one machine
uses a case-independent filesystem and the other uses a case-dependent filesystem (if the system being
debugged is case-independent, 'normcase()' should be used on the paths defined in PATHS_FROM_CLIENT_TO_SERVER).
@note: all the paths with breakpoints must be translated (otherwise they won't be found in the server)
@note: to enable remote debugging in the target machine (pydev extensions in the eclipse installation)
import pydevd;pydevd.settrace(host, stdoutToServer, stderrToServer, port, suspend)
see parameter docs on pydevd.py
@note: for doing a remote debugging session, all the pydevd_ files must be on the server accessible
through the PYTHONPATH (and the PATHS_FROM_CLIENT_TO_SERVER only needs to be set on the target
machine for the paths that'll actually have breakpoints).
'''
from pydevd_constants import * #@UnusedWildImport
import os.path
import sys
import traceback
normcase = os.path.normcase
basename = os.path.basename
exists = os.path.exists
join = os.path.join
try:
rPath = os.path.realpath #@UndefinedVariable
except:
# jython does not support os.path.realpath
# realpath is a no-op on systems without islink support
rPath = os.path.abspath
#defined as a list of tuples where the 1st element of the tuple is the path in the client machine
#and the 2nd element is the path in the server machine.
#see module docstring for more details.
PATHS_FROM_CLIENT_TO_SERVER = []
#example:
#PATHS_FROM_CLIENT_TO_SERVER = [
#(normcase(r'd:\temp\temp_workspace_2\test_python\src\yyy\yyy'),
# normcase(r'd:\temp\temp_workspace_2\test_python\src\hhh\xxx'))]
DEBUG_CLIENT_SERVER_TRANSLATION = False
#caches filled as requested during the debug session
NORM_FILENAME_CONTAINER = {}
NORM_FILENAME_AND_BASE_CONTAINER = {}
NORM_FILENAME_TO_SERVER_CONTAINER = {}
NORM_FILENAME_TO_CLIENT_CONTAINER = {}
def _NormFile(filename):
try:
return NORM_FILENAME_CONTAINER[filename]
except KeyError:
r = normcase(rPath(filename))
#cache it for fast access later
NORM_FILENAME_CONTAINER[filename] = r
return r
#Now, let's do a quick test to see if we're working with a version of python that has no problems
#related to the names generated...
try:
try:
code = rPath.func_code
except AttributeError:
code = rPath.__code__
if not exists(_NormFile(code.co_filename)):
sys.stderr.write('-------------------------------------------------------------------------------\n')
sys.stderr.write('pydev debugger: CRITICAL WARNING: This version of python seems to be incorrectly compiled (internal generated filenames are not absolute)\n')
sys.stderr.write('pydev debugger: The debugger may still function, but it will work slower and may miss breakpoints.\n')
sys.stderr.write('pydev debugger: Related bug: http://bugs.python.org/issue1666807\n')
sys.stderr.write('-------------------------------------------------------------------------------\n')
initial_norm_file = _NormFile
def _NormFile(filename): #Let's redefine _NormFile to work with paths that may be incorrect
ret = initial_norm_file(filename)
if not exists(ret):
#We must actually go on and check if we can find it as if it was a relative path for some of the paths in the pythonpath
for path in sys.path:
ret = initial_norm_file(join(path, filename))
if exists(ret):
break
else:
sys.stderr.write('pydev debugger: Unable to find real location for: %s\n' % (filename,))
ret = filename
return ret
except:
#Don't fail if there's something not correct here -- but at least print it to the user so that we can correct that
traceback.print_exc()
if PATHS_FROM_CLIENT_TO_SERVER:
#Work on the client and server slashes.
client_sep = None
server_sep = None
for client_prefix, server_prefix in PATHS_FROM_CLIENT_TO_SERVER:
if client_sep is not None and server_sep is not None:
break
if client_sep is None:
for c in client_prefix:
if c in ('/', '\\'):
client_sep = c
break
if server_sep is None:
for c in server_prefix:
if c in ('/', '\\'):
server_sep = c
break
#only setup translation functions if absolutely needed!
def NormFileToServer(filename):
try:
return NORM_FILENAME_TO_SERVER_CONTAINER[filename]
except KeyError:
#used to translate a path from the client to the debug server
translated = normcase(filename)
for client_prefix, server_prefix in PATHS_FROM_CLIENT_TO_SERVER:
if translated.startswith(client_prefix):
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: replacing to server: %s\n' % (translated,))
translated = translated.replace(client_prefix, server_prefix)
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: sent to server: %s\n' % (translated,))
break
else:
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: to server: unable to find matching prefix for: %s in %s\n' % \
(translated, [x[0] for x in PATHS_FROM_CLIENT_TO_SERVER]))
ret = _NormFile(translated)
if client_sep is not None and server_sep is not None and client_sep != server_sep:
ret = ret.replace(server_sep, client_sep)
NORM_FILENAME_TO_SERVER_CONTAINER[filename] = translated
return ret
def NormFileToClient(filename):
try:
return NORM_FILENAME_TO_CLIENT_CONTAINER[filename]
except KeyError:
#used to translate a path from the debug server to the client
translated = normcase(filename)
for client_prefix, server_prefix in PATHS_FROM_CLIENT_TO_SERVER:
if translated.startswith(server_prefix):
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: replacing to client: %s\n' % (translated,))
translated = translated.replace(server_prefix, client_prefix)
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: sent to client: %s\n' % (translated,))
break
else:
if DEBUG_CLIENT_SERVER_TRANSLATION:
sys.stderr.write('pydev debugger: to client: unable to find matching prefix for: %s in %s\n' % \
(translated, [x[1] for x in PATHS_FROM_CLIENT_TO_SERVER]))
ret = _NormFile(translated)
if client_sep is not None and server_sep is not None and client_sep != server_sep:
ret = ret.replace(client_sep, server_sep)
NORM_FILENAME_TO_CLIENT_CONTAINER[filename] = ret
return ret
else:
#no translation step needed (just inline the calls)
NormFileToClient = _NormFile
NormFileToServer = _NormFile
def GetFilenameAndBase(frame):
#This one is just internal (so, does not need any kind of client-server translation)
f = frame.f_code.co_filename
try:
return NORM_FILENAME_AND_BASE_CONTAINER[f]
except KeyError:
filename = _NormFile(f)
base = basename(filename)
NORM_FILENAME_AND_BASE_CONTAINER[f] = filename, base
return filename, base
|
import cufflinks as cf
import pydash as _
from plotly import (
graph_objs as go,
offline as py,
)
from unity_lab.lib import util
cf.set_config_file(offline=True, world_readable=False)
py.init_notebook_mode(connected=True)
def create_layout(
title, y_title, x_title, x_type=None,
width=500, height=350):
'''simplified method to generate Layout'''
layout = go.Layout(
title=title,
legend=dict(x=0.0, y=-0.2, orientation='h'),
yaxis=dict(rangemode='tozero', title=y_title),
xaxis=dict(type=x_type, title=x_title),
width=width, height=height,
margin=go.Margin(l=70, r=70, t=70, b=70),
)
return layout
def create_label(
y_col, x_col,
title=None, y_title=None, x_title=None, legend_name=None):
'''Create label dict for go.Layout with smart resolution'''
y_title = y_title or y_col
x_title = x_title or x_col
title = title or f'{y_title} vs {x_title}'
legend_name = legend_name or y_col
y_col_list, x_col_list, legend_name_list = _.map_(
[y_col, x_col, legend_name], util.wrap_list)
label = {
'y_title': y_title,
'x_title': x_title,
'title': title,
'y_col_list': y_col_list,
'x_col_list': x_col_list,
'legend_name_list': legend_name_list,
}
return label
def plot_scatter(
df, y_col, x_col=None,
title=None, y_title=None, x_title=None, x_type=None,
legend_name=None, width=500, height=350, draw=True):
'''Draw scatter plot from df'''
df = df.copy()
if x_col is None:
x_col = 'index'
df['index'] = df.index.tolist()
label = create_label(
y_col, x_col, title, y_title, x_title, legend_name)
layout = create_layout(
title=label['title'], y_title=label['y_title'],
x_title=label['x_title'], x_type=x_type,
width=width, height=height)
data = []
for idx, y_c in enumerate(label['y_col_list']):
x_c = _.get(label['x_col_list'], idx, default=x_col)
trace = go.Scatter(
y=df[y_c], x=df[x_c],
name=label['legend_name_list'][idx],
)
data.append(trace)
figure = go.Figure(data=data, layout=layout)
if draw:
py.iplot(figure)
return figure
sort methods
import cufflinks as cf
import pydash as _
from plotly import (
graph_objs as go,
offline as py,
)
from unity_lab.lib import util
cf.set_config_file(offline=True, world_readable=False)
py.init_notebook_mode(connected=True)
def create_label(
y_col, x_col,
title=None, y_title=None, x_title=None, legend_name=None):
'''Create label dict for go.Layout with smart resolution'''
y_title = y_title or y_col
x_title = x_title or x_col
title = title or f'{y_title} vs {x_title}'
legend_name = legend_name or y_col
y_col_list, x_col_list, legend_name_list = _.map_(
[y_col, x_col, legend_name], util.wrap_list)
label = {
'y_title': y_title,
'x_title': x_title,
'title': title,
'y_col_list': y_col_list,
'x_col_list': x_col_list,
'legend_name_list': legend_name_list,
}
return label
def create_layout(
title, y_title, x_title, x_type=None,
width=500, height=350):
'''simplified method to generate Layout'''
layout = go.Layout(
title=title,
legend=dict(x=0.0, y=-0.2, orientation='h'),
yaxis=dict(rangemode='tozero', title=y_title),
xaxis=dict(type=x_type, title=x_title),
width=width, height=height,
margin=go.Margin(l=70, r=70, t=70, b=70),
)
return layout
def plot_scatter(
df, y_col, x_col=None,
title=None, y_title=None, x_title=None, x_type=None,
legend_name=None, width=500, height=350, draw=True):
'''Draw scatter plot from df'''
df = df.copy()
if x_col is None:
x_col = 'index'
df['index'] = df.index.tolist()
label = create_label(
y_col, x_col, title, y_title, x_title, legend_name)
layout = create_layout(
title=label['title'], y_title=label['y_title'],
x_title=label['x_title'], x_type=x_type,
width=width, height=height)
data = []
for idx, y_c in enumerate(label['y_col_list']):
x_c = _.get(label['x_col_list'], idx, default=x_col)
trace = go.Scatter(
y=df[y_c], x=df[x_c],
name=label['legend_name_list'][idx],
)
data.append(trace)
figure = go.Figure(data=data, layout=layout)
if draw:
py.iplot(figure)
return figure
|
import uuid
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import View
from rest_framework import status
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework.parsers import JSONParser
from rest_framework.permissions import IsAuthenticated
from rest_framework.authentication import TokenAuthentication, SessionAuthentication
from .models import Entry, Journal
from .serializers import EntrySerializer, JournalSerializer
class BaseViewSet(viewsets.ModelViewSet):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
def get_user_queryset(self, queryset, user):
return queryset.filter(journal__owner=self.request.user)
class JournalViewSet(BaseViewSet):
allowed_methods = ['GET', 'PUT', 'DELETE']
queryset = Journal.objects.all()
serializer_class = JournalSerializer
lookup_field = 'uuid'
def get_queryset(self):
queryset = type(self).queryset
return queryset.filter(owner=self.request.user, deleted=False)
def destroy(self, request, uuid=None):
journal = self.get_object()
journal.deleted = True
journal.save()
return Response({})
def put(self, request):
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
serializer.save(owner=self.request.user)
return Response({}, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class EntryViewSet(BaseViewSet):
allowed_methods = ['GET', 'PUT']
queryset = Entry.objects.all()
serializer_class = EntrySerializer
lookup_field = 'uuid'
def get_queryset(self):
queryset = type(self).queryset
queryset = self.get_user_queryset(queryset, self.request.user)
journal = uuid.UUID(self.kwargs['journal'])
return queryset.filter(journal__uuid=journal)
def list(self, request, journal):
last = request.query_params.get('last', None)
if last is not None:
queryset = self.get_queryset()
last_entry = queryset.get(uuid=last)
queryset = queryset.filter(id__gt=last_entry.id)
serializer = self.serializer_class(queryset, many=True)
return Response(serializer.data)
return super().list(self, request)
def put(self, request, journal):
journal = uuid.UUID(journal)
journal_object = Journal.objects.get(uuid=journal, owner=self.request.user)
serializer = self.serializer_class(data=request.data, many=True)
if serializer.is_valid():
serializer.save(journal=journal_object)
return Response({}, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
Journal: return 204 when deleting.
import uuid
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import View
from rest_framework import status
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework.parsers import JSONParser
from rest_framework.permissions import IsAuthenticated
from rest_framework.authentication import TokenAuthentication, SessionAuthentication
from .models import Entry, Journal
from .serializers import EntrySerializer, JournalSerializer
class BaseViewSet(viewsets.ModelViewSet):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
def get_user_queryset(self, queryset, user):
return queryset.filter(journal__owner=self.request.user)
class JournalViewSet(BaseViewSet):
allowed_methods = ['GET', 'PUT', 'DELETE']
queryset = Journal.objects.all()
serializer_class = JournalSerializer
lookup_field = 'uuid'
def get_queryset(self):
queryset = type(self).queryset
return queryset.filter(owner=self.request.user, deleted=False)
def destroy(self, request, uuid=None):
journal = self.get_object()
journal.deleted = True
journal.save()
return Response(status=status.HTTP_204_NO_CONTENT)
def put(self, request):
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
serializer.save(owner=self.request.user)
return Response({}, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class EntryViewSet(BaseViewSet):
allowed_methods = ['GET', 'PUT']
queryset = Entry.objects.all()
serializer_class = EntrySerializer
lookup_field = 'uuid'
def get_queryset(self):
queryset = type(self).queryset
queryset = self.get_user_queryset(queryset, self.request.user)
journal = uuid.UUID(self.kwargs['journal'])
return queryset.filter(journal__uuid=journal)
def list(self, request, journal):
last = request.query_params.get('last', None)
if last is not None:
queryset = self.get_queryset()
last_entry = queryset.get(uuid=last)
queryset = queryset.filter(id__gt=last_entry.id)
serializer = self.serializer_class(queryset, many=True)
return Response(serializer.data)
return super().list(self, request)
def put(self, request, journal):
journal = uuid.UUID(journal)
journal_object = Journal.objects.get(uuid=journal, owner=self.request.user)
serializer = self.serializer_class(data=request.data, many=True)
if serializer.is_valid():
serializer.save(journal=journal_object)
return Response({}, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
from django.shortcuts import render
from django.http import JsonResponse
from django.core.management import call_command
from rest_framework.response import Response
from rest_framework import status
from rest_framework.decorators import api_view
from ipware.ip import get_ip
from .models import CaptchaToken, CaptchaSession, TextCaptchaSession, ImageCaptchaSession, ImageCaptchaToken, TextCaptchaToken
from random import randint
from PIL import Image
import uuid
import zipfile
import shutil
import os
@api_view(['GET'])
def request(request):
remote_ip = get_ip(request)
session = TextCaptchaSession()
session, response = session.create(remote_ip)
session.save()
return response
@api_view(['POST'])
def validate(request):
params = request.POST
session_key = params.get('session_key', None)
session = _retrieve_corresponding_session(session_key, request)
response = session.validate(params)
return response
@api_view(['POST'])
def renew(request):
params = request.POST
session_key = params.get('session_key', None)
if _any_parameter_unset(session_key):
return Response(status=status.HTTP_400_BAD_REQUEST)
session = _retrieve_corresponding_session(session_key, request)
return session.renew()
@api_view(['POST'])
def upload(request):
params = request.POST
captchatype = params.get('captchatype', None)
solved = params.get('textsolution', None)
captchafile = request.FILES
data_folder = captchafile.get('files', None)
#TODO task
#TODO test if its zipfile
zf = zipfile.ZipFile(data_folder, 'r')
try:
zf.extractall('temp')
except KeyError:
print 'Error: Could not extract Zip'
path = 'temp/captchas/'
listing = os.listdir(path)
if (solved == "unsolved"):
for file in listing:
im = open(path + file, 'rb')
image_data = im.read()
im.close()
if (captchatype == 'imagecaptcha'):
token = ImageCaptchaToken()
token.create(file, image_data, 0, "testtask7") #TODO task
elif (captchatype == 'textcaptcha'):
token = TextCaptchaToken()
token.create(file, image_data, 0, 'testtext')
token.save()
elif (solved == "solved"):
for file_name, solution in _yield_captcha_solutions():
im = open(path + file_name, 'rb')
image_data = im.read()
im.close()
if (captchatype == 'imagecaptcha'):
token = ImageCaptchaToken()
token.create(file_name, image_data, 1, "testtask8", solution=='1') #TODO task, solution=='1' evaluates to bool True
print solution
elif (captchatype == 'textcaptcha'):
token = TextCaptchaToken()
token.create(file_name, image_data, 1, solution)
token.save()
call_command('collectstatic', verbosity=0, interactive=False)
shutil.rmtree('temp')
return Response("hdoiasjd")
def _retrieve_corresponding_session(session_key, request):
try:
session = CaptchaSession.objects.get(pk=session_key)
except:
return Response("Session does not exist.", status=status.HTTP_404_NOT_FOUND)
# assert the remote ip when opening the session and validating them are identical
# perhaps, remote ip can't be resolved in this case they are evaluated to None - it would still work
if not get_ip(request) == session.origin:
return Response("ip when opening the session and ip when validating it are not in agreement.",
status=status.HTTP_403_FORBIDDEN)
return session
def _yield_captcha_solutions():
with open('temp/captchas.txt', 'r') as f:
for line in f:
[file_name, solution] = line.split(';')
solution = solution.strip()
yield file_name, solution
def _any_parameter_unset(*keys):
for key in keys:
if not key:
return True
return False
added responseredirect
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.http import JsonResponse
from django.core.management import call_command
from rest_framework.response import Response
from rest_framework import status
from rest_framework.decorators import api_view
from ipware.ip import get_ip
from .models import CaptchaToken, CaptchaSession, TextCaptchaSession, ImageCaptchaSession, ImageCaptchaToken, TextCaptchaToken
from random import randint
from PIL import Image
import uuid
import zipfile
import shutil
import os
@api_view(['GET'])
def request(request):
remote_ip = get_ip(request)
session = TextCaptchaSession()
session, response = session.create(remote_ip)
session.save()
return response
@api_view(['POST'])
def validate(request):
params = request.POST
session_key = params.get('session_key', None)
session = _retrieve_corresponding_session(session_key, request)
response = session.validate(params)
return response
@api_view(['POST'])
def renew(request):
params = request.POST
session_key = params.get('session_key', None)
if _any_parameter_unset(session_key):
return Response(status=status.HTTP_400_BAD_REQUEST)
session = _retrieve_corresponding_session(session_key, request)
return session.renew()
@api_view(['POST'])
def upload(request):
params = request.POST
captchatype = params.get('captchatype', None)
solved = params.get('textsolution', None)
task = params.get('task', None)
captchafile = request.FILES
data_folder = captchafile.get('files', None)
#TODO task
#TODO test if its zipfile
zf = zipfile.ZipFile(data_folder, 'r')
try:
zf.extractall('temp')
except KeyError:
print 'Error: Could not extract Zip'
path = 'temp/captchas/'
listing = os.listdir(path)
if (solved == "unsolved"):
for file in listing:
im = open(path + file, 'rb')
image_data = im.read()
im.close()
if (captchatype == 'imagecaptcha'):
token = ImageCaptchaToken()
token.create(file, image_data, 0, "testtask7") #TODO task
elif (captchatype == 'textcaptcha'):
token = TextCaptchaToken()
token.create(file, image_data, 0, 'testtext')
token.save()
elif (solved == "solved"):
for file_name, solution in _yield_captcha_solutions():
im = open(path + file_name, 'rb')
image_data = im.read()
im.close()
if (captchatype == 'imagecaptcha'):
token = ImageCaptchaToken()
token.create(file_name, image_data, 1, "testtask8", solution=='1') #TODO task, solution=='1' evaluates to bool True
print solution
elif (captchatype == 'textcaptcha'):
token = TextCaptchaToken()
token.create(file_name, image_data, 1, solution)
token.save()
call_command('collectstatic', verbosity=0, interactive=False)
shutil.rmtree('temp')
return HttpResponseRedirect('/')
def _retrieve_corresponding_session(session_key, request):
try:
session = CaptchaSession.objects.get(pk=session_key)
except:
return Response("Session does not exist.", status=status.HTTP_404_NOT_FOUND)
# assert the remote ip when opening the session and validating them are identical
# perhaps, remote ip can't be resolved in this case they are evaluated to None - it would still work
if not get_ip(request) == session.origin:
return Response("ip when opening the session and ip when validating it are not in agreement.",
status=status.HTTP_403_FORBIDDEN)
return session
def _yield_captcha_solutions():
#TODO try/catch if txt exists
with open('temp/captchas.txt', 'r') as f:
for line in f:
[file_name, solution] = line.split(';')
solution = solution.strip()
yield file_name, solution
def _any_parameter_unset(*keys):
for key in keys:
if not key:
return True
return False
|
import os
BUILD_DIRECTORY = "build/"
HEADER_TEMPLATE_DIRECTORY = "templates"
BACKBONE_TEMPLATE_DIRECTORY = "templates/underscore-templates/"
def format_underscore_template(name, content):
"""
Format the template as an Underscore.js template.
"""
return '<script type="text/template" id="{0}">\n{1}\n</script>'.format(name, content)
def build_underscore_templates():
# Open the file to build
file = open(BUILD_DIRECTORY + "index.html", "w+")
# Get and write it's content
file.write(assemble_templates)
file.close()
# Execute
build_underscore_templates();
Fixed: Don't bother passing header as param
import os
BUILD_DIRECTORY = "build/"
HEADER_TEMPLATE_DIRECTORY = "templates"
BACKBONE_TEMPLATE_DIRECTORY = "templates/underscore-templates/"
def format_underscore_template(name, content):
"""
Format the template as an Underscore.js template.
"""
return '<script type="text/template" id="{0}">\n{1}\n</script>'.format(name, content)
def assemble_templates(backbone_template_formatter):
def build_underscore_templates():
# Open the file to build
file = open(BUILD_DIRECTORY + "index.html", "w+")
# Get and write it's content
file.write(assemble_templates(format_underscore_template))
file.close()
# Execute
build_underscore_templates();
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from db import Database
from configFileOps import configFileOps
from serviceConfig import serviceCfgBase
from cloudException import CloudRuntimeException, CloudInternalException
from utilities import bash
import os
class cloudManagementConfig(serviceCfgBase):
def __init__(self, syscfg):
super(cloudManagementConfig, self).__init__(syscfg)
self.serviceName = "CloudStack Management Server"
def config(self):
def checkHostName():
ret = bash("hostname --fqdn")
if not ret.isSuccess():
raise CloudInternalException("Cannot get hostname, 'hostname --fqdn failed'")
if self.syscfg.env.svrMode == "mycloud":
cfo = configFileOps("/usr/share/cloudstack-management/conf/environment.properties", self)
cfo.addEntry("cloud-stack-components-specification", "components-cloudzones.xml")
cfo.save()
cfo = configFileOps("/usr/share/cloudstack-management/conf/db.properties", self)
dbHost = cfo.getEntry("db.cloud.host")
dbPort = cfo.getEntry("db.cloud.port")
dbUser = cfo.getEntry("db.cloud.username")
dbPass = cfo.getEntry("db.cloud.password")
if dbPass.strip() == "":
dbPass = None
dbName = cfo.getEntry("db.cloud.name")
db = Database(dbUser, dbPass, dbHost, dbPort, dbName)
try:
db.testConnection()
except CloudRuntimeException, e:
raise e
except:
raise CloudInternalException("Failed to connect to Mysql server")
try:
statement = """ UPDATE configuration SET value='%s' WHERE name='%s'"""
db.execute(statement%('true','use.local.storage'))
db.execute(statement%('20','max.template.iso.size'))
statement = """ UPDATE vm_template SET url='%s',checksum='%s' WHERE id='%s' """
db.execute(statement%('https://rightscale-cloudstack.s3.amazonaws.com/kvm/RightImage_CentOS_5.4_x64_v5.6.28.qcow2.bz2', '90fcd2fa4d3177e31ff296cecb9933b7', '4'))
statement="""UPDATE disk_offering set use_local_storage=1"""
db.execute(statement)
except:
raise e
#add DNAT 443 to 8250
if not bash("iptables-save |grep PREROUTING | grep 8250").isSuccess():
bash("iptables -A PREROUTING -t nat -p tcp --dport 443 -j REDIRECT --to-port 8250 ")
#generate keystore
keyPath = "/var/cloudstack/management/web.keystore"
if not os.path.exists(keyPath):
cmd = bash("keytool -genkey -keystore %s -storepass \"cloud.com\" -keypass \"cloud.com\" -validity 3650 -dname cn=\"Cloudstack User\",ou=\"mycloud.cloud.com\",o=\"mycloud.cloud.com\",c=\"Unknown\""%keyPath)
if not cmd.isSuccess():
raise CloudInternalException(cmd.getErrMsg())
if not self.syscfg.env.svrConf == "Tomcat7":
cfo = configFileOps("/etc/cloudstack/management/tomcat6.conf", self)
cfo.add_lines("JAVA_OPTS+=\" -Djavax.net.ssl.trustStore=%s \""%keyPath)
elif self.syscfg.env.svrMode == "HttpsServer":
if self.syscfg.env.svrConf == "Tomcat7":
if not os.path.exists("/etc/cloudstack/management/server7-ssl.xml"):
raise CloudRuntimeException("Cannot find /etc/cloudstack/management/server7-ssl.xml, https enable failed")
if os.path.exists("/etc/cloudstack/management/server.xml"):
bash("rm -f /etc/cloudstack/management/server.xml")
bash("ln -s /etc/cloudstack/management/server7-ssl.xml /etc/cloudstack/management/server.xml")
else:
if not os.path.exists("/etc/cloudstack/management/server-ssl.xml") or not os.path.exists("/etc/cloudstack/management/tomcat6-ssl.conf"):
raise CloudRuntimeException("Cannot find /etc/cloudstack/management/server-ssl.xml or /etc/cloudstack/management/tomcat6-ssl.conf, https enable failed")
if os.path.exists("/etc/cloudstack/management/server.xml"):
bash("rm -f /etc/cloudstack/management/server.xml")
if os.path.exists("/etc/cloudstack/management/tomcat6.conf"):
bash("rm -f /etc/cloudstack/management/tomcat6.conf")
bash("ln -s /etc/cloudstack/management/server-ssl.xml /etc/cloudstack/management/server.xml")
bash("ln -s /etc/cloudstack/management/tomcat6-ssl.conf /etc/cloudstack/management/tomcat6.conf")
if not bash("iptables-save |grep PREROUTING | grep 6443").isSuccess():
bash("iptables -A PREROUTING -t nat -p tcp --dport 443 -j REDIRECT --to-port 6443")
else:
if self.syscfg.env.svrConf == "Tomcat7":
if not os.path.exists("/etc/cloudstack/management/server7-nonssl.xml"):
raise CloudRuntimeException("Cannot find /etc/cloudstack/management/server7-nonssl.xml, https enable failed")
if os.path.exists("/etc/cloudstack/management/server.xml"):
bash("rm -f /etc/cloudstack/management/server.xml")
bash("ln -s /etc/cloudstack/management/server7-nonssl.xml /etc/cloudstack/management/server.xml")
else:
if not os.path.exists("/etc/cloudstack/management/server-nonssl.xml") or not os.path.exists("/etc/cloudstack/management/tomcat6-nonssl.conf"):
raise CloudRuntimeException("Cannot find /etc/cloudstack/management/server-nonssl.xml or /etc/cloudstack/management/tomcat6-nonssl.conf, https enable failed")
if os.path.exists("/etc/cloudstack/management/server.xml"):
bash("rm -f /etc/cloudstack/management/server.xml")
if os.path.exists("/etc/cloudstack/management/tomcat6.conf"):
bash("rm -f /etc/cloudstack/management/tomcat6.conf")
bash("ln -s /etc/cloudstack/management/server-nonssl.xml /etc/cloudstack/management/server.xml")
bash("ln -s /etc/cloudstack/management/tomcat6-nonssl.conf /etc/cloudstack/management/tomcat6.conf")
bash("touch /var/run/cloudstack-management.pid")
bash("chown cloud.cloud /var/run/cloudstack-management.pid")
#distro like sl 6.1 needs this folder, or tomcat6 failed to start
checkHostName()
bash("mkdir /var/log/cloudstack-management/")
#set max process per account is unlimited
if os.path.exists("/etc/security/limits.conf"):
cfo = configFileOps("/etc/security/limits.conf")
cfo.add_lines("cloud soft nproc -1\n")
cfo.add_lines("cloud hard nproc -1\n")
cfo.save()
try:
if self.syscfg.env.svrConf == "Tomcat7":
self.syscfg.svo.disableService("tomcat")
else:
self.syscfg.svo.disableService("tomcat6")
except:
pass
self.syscfg.svo.stopService("cloudstack-management")
if self.syscfg.svo.enableService("cloudstack-management"):
return True
else:
raise CloudRuntimeException("Failed to configure %s, please see the /var/log/cloudstack/management/setupManagement.log for detail"%self.serviceName)
CLOUDSTACK-9076: Changed ownership of directory /var/lib/cloudstack to cloud.
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from db import Database
from configFileOps import configFileOps
from serviceConfig import serviceCfgBase
from cloudException import CloudRuntimeException, CloudInternalException
from utilities import bash
import os
class cloudManagementConfig(serviceCfgBase):
def __init__(self, syscfg):
super(cloudManagementConfig, self).__init__(syscfg)
self.serviceName = "CloudStack Management Server"
def config(self):
def checkHostName():
ret = bash("hostname --fqdn")
if not ret.isSuccess():
raise CloudInternalException("Cannot get hostname, 'hostname --fqdn failed'")
if self.syscfg.env.svrMode == "mycloud":
cfo = configFileOps("/usr/share/cloudstack-management/conf/environment.properties", self)
cfo.addEntry("cloud-stack-components-specification", "components-cloudzones.xml")
cfo.save()
cfo = configFileOps("/usr/share/cloudstack-management/conf/db.properties", self)
dbHost = cfo.getEntry("db.cloud.host")
dbPort = cfo.getEntry("db.cloud.port")
dbUser = cfo.getEntry("db.cloud.username")
dbPass = cfo.getEntry("db.cloud.password")
if dbPass.strip() == "":
dbPass = None
dbName = cfo.getEntry("db.cloud.name")
db = Database(dbUser, dbPass, dbHost, dbPort, dbName)
try:
db.testConnection()
except CloudRuntimeException, e:
raise e
except:
raise CloudInternalException("Failed to connect to Mysql server")
try:
statement = """ UPDATE configuration SET value='%s' WHERE name='%s'"""
db.execute(statement%('true','use.local.storage'))
db.execute(statement%('20','max.template.iso.size'))
statement = """ UPDATE vm_template SET url='%s',checksum='%s' WHERE id='%s' """
db.execute(statement%('https://rightscale-cloudstack.s3.amazonaws.com/kvm/RightImage_CentOS_5.4_x64_v5.6.28.qcow2.bz2', '90fcd2fa4d3177e31ff296cecb9933b7', '4'))
statement="""UPDATE disk_offering set use_local_storage=1"""
db.execute(statement)
except:
raise e
#add DNAT 443 to 8250
if not bash("iptables-save |grep PREROUTING | grep 8250").isSuccess():
bash("iptables -A PREROUTING -t nat -p tcp --dport 443 -j REDIRECT --to-port 8250 ")
#generate keystore
keyPath = "/var/cloudstack/management/web.keystore"
if not os.path.exists(keyPath):
cmd = bash("keytool -genkey -keystore %s -storepass \"cloud.com\" -keypass \"cloud.com\" -validity 3650 -dname cn=\"Cloudstack User\",ou=\"mycloud.cloud.com\",o=\"mycloud.cloud.com\",c=\"Unknown\""%keyPath)
if not cmd.isSuccess():
raise CloudInternalException(cmd.getErrMsg())
if not self.syscfg.env.svrConf == "Tomcat7":
cfo = configFileOps("/etc/cloudstack/management/tomcat6.conf", self)
cfo.add_lines("JAVA_OPTS+=\" -Djavax.net.ssl.trustStore=%s \""%keyPath)
elif self.syscfg.env.svrMode == "HttpsServer":
if self.syscfg.env.svrConf == "Tomcat7":
if not os.path.exists("/etc/cloudstack/management/server7-ssl.xml"):
raise CloudRuntimeException("Cannot find /etc/cloudstack/management/server7-ssl.xml, https enable failed")
if os.path.exists("/etc/cloudstack/management/server.xml"):
bash("rm -f /etc/cloudstack/management/server.xml")
bash("ln -s /etc/cloudstack/management/server7-ssl.xml /etc/cloudstack/management/server.xml")
else:
if not os.path.exists("/etc/cloudstack/management/server-ssl.xml") or not os.path.exists("/etc/cloudstack/management/tomcat6-ssl.conf"):
raise CloudRuntimeException("Cannot find /etc/cloudstack/management/server-ssl.xml or /etc/cloudstack/management/tomcat6-ssl.conf, https enable failed")
if os.path.exists("/etc/cloudstack/management/server.xml"):
bash("rm -f /etc/cloudstack/management/server.xml")
if os.path.exists("/etc/cloudstack/management/tomcat6.conf"):
bash("rm -f /etc/cloudstack/management/tomcat6.conf")
bash("ln -s /etc/cloudstack/management/server-ssl.xml /etc/cloudstack/management/server.xml")
bash("ln -s /etc/cloudstack/management/tomcat6-ssl.conf /etc/cloudstack/management/tomcat6.conf")
if not bash("iptables-save |grep PREROUTING | grep 6443").isSuccess():
bash("iptables -A PREROUTING -t nat -p tcp --dport 443 -j REDIRECT --to-port 6443")
else:
if self.syscfg.env.svrConf == "Tomcat7":
if not os.path.exists("/etc/cloudstack/management/server7-nonssl.xml"):
raise CloudRuntimeException("Cannot find /etc/cloudstack/management/server7-nonssl.xml, https enable failed")
if os.path.exists("/etc/cloudstack/management/server.xml"):
bash("rm -f /etc/cloudstack/management/server.xml")
bash("ln -s /etc/cloudstack/management/server7-nonssl.xml /etc/cloudstack/management/server.xml")
else:
if not os.path.exists("/etc/cloudstack/management/server-nonssl.xml") or not os.path.exists("/etc/cloudstack/management/tomcat6-nonssl.conf"):
raise CloudRuntimeException("Cannot find /etc/cloudstack/management/server-nonssl.xml or /etc/cloudstack/management/tomcat6-nonssl.conf, https enable failed")
if os.path.exists("/etc/cloudstack/management/server.xml"):
bash("rm -f /etc/cloudstack/management/server.xml")
if os.path.exists("/etc/cloudstack/management/tomcat6.conf"):
bash("rm -f /etc/cloudstack/management/tomcat6.conf")
bash("ln -s /etc/cloudstack/management/server-nonssl.xml /etc/cloudstack/management/server.xml")
bash("ln -s /etc/cloudstack/management/tomcat6-nonssl.conf /etc/cloudstack/management/tomcat6.conf")
bash("touch /var/run/cloudstack-management.pid")
bash("chown cloud.cloud /var/run/cloudstack-management.pid")
#distro like sl 6.1 needs this folder, or tomcat6 failed to start
checkHostName()
bash("mkdir /var/log/cloudstack-management/")
bash("chown cloud:cloud -R /var/lib/cloudstack")
#set max process per account is unlimited
if os.path.exists("/etc/security/limits.conf"):
cfo = configFileOps("/etc/security/limits.conf")
cfo.add_lines("cloud soft nproc -1\n")
cfo.add_lines("cloud hard nproc -1\n")
cfo.save()
try:
if self.syscfg.env.svrConf == "Tomcat7":
self.syscfg.svo.disableService("tomcat")
else:
self.syscfg.svo.disableService("tomcat6")
except:
pass
self.syscfg.svo.stopService("cloudstack-management")
if self.syscfg.svo.enableService("cloudstack-management"):
return True
else:
raise CloudRuntimeException("Failed to configure %s, please see the /var/log/cloudstack/management/setupManagement.log for detail"%self.serviceName)
|
#!/usr/bin/env python
import sys
import os
import argparse
import subprocess
import json
# from time import time
# import fileinput
# import re
from scripts.process_dc_env import pythonGetEnv
# ==============================================================================
"""
After an update and push to the application repository the code on the
instance needs have it's code updated from the repository. This script will
perform the necessary actions to get the destination compoonent up to date.
"""
__version__ = "0.1"
__copyright__ = "Copyright 2016, devops.center"
__credits__ = ["Bob Lozano", "Gregg Jensen"]
__license__ = "GPL"
__status__ = "Development"
# ==============================================================================
# NEED TO READ IN appArchitecture that was used to build the instance
# we are about to upate. There are to many variables that are already
# defined that we wouldn't have to define again. Also, this would imply
# that the file would be put in the customers appUtils (say in config/$ENV)
# and have it read only to make it give language that says hey while I know
# you can change this but it wasn't meant to be modified. devops.center
# owns the master and this is for reference only
class UpdateInstance:
def __init__(self, theAppName, theEnv, Destination, theConfigFile,
accessKey, pathToKeys, test):
"""UpdateComponent constructor"""
self.appName = theAppName
self.env = theEnv
self.dest = Destination
self.targetUser = "ubuntu"
self.accessKey = accessKey
self.pathToKeys = pathToKeys
self.test = test
self.configList = self.readConfigFile(theConfigFile)
def run(self):
# first remove the log file from a previous run if it exists
logFile = "create-instance.log"
if not self.test:
logFileAndPath = "ec2/" + logFile
if os.path.isfile(logFileAndPath):
os.remove(logFileAndPath)
# go through each element found in the configList
for component in self.configList:
componentInfo = component[0]
componentItem = component[1]
numberToCreate = 1
# go through the environemnt variables and print them out
print "Environment Variables for the component:\n"
for item in componentInfo.keys():
print "{}={}\n".format(item, componentInfo[item]),
# now print out the elements for the component
if componentItem["type"] == 'db':
print ("Name=" + componentItem["name"] + " "
"Number=" + str(numberToCreate) + " "
"Version=" + self.appName + " "
"Environment=" + componentInfo["ENVIRONMENT"] + " "
"Type=" + componentItem["type"] + " "
"Role=" + componentItem["ROLE"] + " ")
else:
print ("Name=" + componentItem["name"] + " "
"Number=" + str(numberToCreate) + " "
"Version=" + self.appName + " "
"Environment=" + componentInfo["ENVIRONMENT"] + " "
"Type=" + componentItem["type"] + " ")
# and now print out the separate components and their count
if componentItem["type"] == 'db':
print ("\nThere will be " + str(numberToCreate) +
" instances of type: " + componentItem["type"] + " "
"and role: " + componentItem["ROLE"])
else:
print ("\nThere will be " + str(numberToCreate) +
" instances of type: " + componentItem["type"] + " ")
# set up the cmd to run
cmdToRun = self.buildCmdToRun(componentInfo, "updateAppInstance")
# and now execute it
if self.test:
print "Command to Run: \n" + cmdToRun
else:
print "Running this command: \n" + cmdToRun
subprocess.call(cmdToRun, shell=True)
def buildCmdToRun(self, componentInfo, optionToAdd):
cmdToRun = "cd scripts; ./updateApp.sh"
cmdToRun += " --accessKey " + self.accessKey
cmdToRun += " --destination " + self.dest
cmdToRun = self.addToCommand(cmdToRun, componentInfo,
"APPNAME", "appName")
cmdToRun = self.addToCommand(cmdToRun, componentInfo,
"PROFILE", "profile")
cmdToRun = self.addToCommand(cmdToRun, componentInfo, "REGION",
"region")
cmdToRun = self.addToCommand(cmdToRun, componentInfo, "ROLE",
"role")
# cmdToRun = self.addToCommand(cmdToRun, componentInfo,
# "BACKUP_S3_REGION",
# "backupS3Region")
# cmdToRun = self.addToCommand(cmdToRun, componentInfo,
# "AVAILABILITYZONE", "az")
# cmdToRun = self.addToCommand(cmdToRun, componentInfo, "VPCID",
# "vpcid")
# cmdToRun = self.addToCommand(cmdToRun, componentInfo,
# "SUBNETID", "subnetid")
# cmdToRun = self.addToCommand(cmdToRun, componentInfo,
# "CIDR_BLOCK", "cidrBlock")
# cmdToRun = self.addToCommand(cmdToRun, componentInfo,
# "CUSTWEBGITBRANCH",
# "custwebgitbranch")
# cmdToRun = self.addToCommand(cmdToRun, componentInfo,
# "DCSTACK_GITBRANCH",
# "dcstackgitbranch")
# cmdToRun = self.addToCommand(cmdToRun, componentInfo,
# "UTILSGITBRANCH",
# "utilsgitbranch")
# cmdToRun = self.addToCommand(cmdToRun, componentInfo,
# "DBNAME", "dbname")
# cmdToRun = self.addToCommand(cmdToRun, componentInfo,
# "BACKUPFILE", "backupfile")
# cmdToRun = self.addToCommand(cmdToRun, componentInfo,
# "PGVERSION", "pgversion")
# cmdToRun = self.addToCommand(cmdToRun, componentInfo,
# "STACK", "stack")
# cmdToRun = self.addToCommand(cmdToRun, componentInfo,
# "UTILS", "utils")
# CUSTOMER_UTILS has the git string
cmdToRun = self.addToCommand(cmdToRun, componentInfo,
"CUSTOMER_UTILS", "gitString")
cmdToRun = self.addToCommand(cmdToRun, componentInfo,
"CUSTUTILSGITBRANCH",
"custutilsgitbranch")
cmdToRun = self.addToCommand(cmdToRun, componentInfo,
"DEPLOYMENT_KEYPAIR", "deploykey")
# cmdToRun = self.addToCommand(cmdToRun, componentInfo,
# "DNS_METHOD", "dns")
# cmdToRun = self.addToCommand(cmdToRun, componentInfo,
# "REDIS", "redis")
# cmdToRun = self.addToCommand(cmdToRun, componentInfo,
# "REDISFOLLOWER", "redisfollower")
# cmdToRun = self.addToCommand(cmdToRun, componentInfo,
# "ROUTE53ZONE", "route53zone")
cmdToRun = self.addToCommand(cmdToRun, componentInfo,
"ENVIRONMENT", "env")
cmdToRun = self.addToCommand(cmdToRun, componentInfo,
"LOCAL_KEYPAIR_DIR",
"localKeyPairDir")
# cmdToRun = self.addToCommand(cmdToRun, componentInfo,
# "LOCAL_KEYPAIR",
# "localKeyPair")
# cmdToRun = self.addToCommand(cmdToRun, componentInfo,
# "COMBINED_WEB_WORKER",
# "combinedWebWorker")
#
return cmdToRun
def addToCommand(self, string, componentInfo, aKey, cmdOption):
returnStr = string
if componentInfo[aKey]:
keyValue = ""
if isinstance(componentInfo[aKey], (int, long)):
keyValue = str(componentInfo[aKey])
else:
keyValue = componentInfo[aKey]
returnStr += " --" + cmdOption + " " + keyValue
return returnStr
def readConfigFile(self, aConfigFile):
"""read the json config file and return a List of the elements found
defined in the file"""
# first read in the file
data = []
with open(aConfigFile) as data_file:
data = json.load(data_file)
# now parse the data and make the list of components
returnList = []
# going through the components need to check if there is an
# variable that needs to overwrite one of the default ones
for item in data["components"]:
# a single element in the list will consist of the component-info
# and the component details, all of which are just key value pairs
componentInfo = dict(data["component-info"])
# overwrite the appropriate variables with the appropriate command
# line version of the options
# componentInfo["PROFILE"] = self.appName
if self.env:
componentInfo["ENVIRONMENT"] = self.env
if "type" in item:
componentInfo["SUFFIX"] = item["type"]
if "ROLE" in item:
componentInfo["ROLE"] = item["ROLE"]
if "DEPLOYMENT_KEYPAIR" in componentInfo:
# need to get the "keys' path and create new
pathAndDeployKey = self.pathToKeys + "/" + \
componentInfo["DEPLOYMENT_KEYPAIR"]
componentInfo["DEPLOYMENT_KEYPAIR"] = pathAndDeployKey
if "LOCAL_KEYPAIR" in componentInfo:
componentInfo["LOCAL_KEYPAIR_DIR"] = self.pathToKeys
for aKey in item.keys():
if aKey in componentInfo:
componentInfo[aKey] = item[aKey]
else:
componentInfo[aKey] = item[aKey]
returnList.append((componentInfo, item))
return returnList
def checkArgs():
parser = argparse.ArgumentParser(
description='This script provides an administrative interface to a ' +
'customers application to perform an update on a target instance. ' +
'Once the configuration has been ' +
'changed and committed to the respository, call this script to ' +
'have the code be updated on the instance. ')
parser.add_argument('-c', '--configFile', help='The json config file ' +
'that defines the architecture for the appName',
required=True)
parser.add_argument('-d', '--destination', help='The target instance to ' +
'have the update performed on.',
required=True)
parser.add_argument('-x', '--accessKey', help='the access key to get to ' +
'the instance.',
required=True)
parser.add_argument('-t', '--test', help='Will run the script but ' +
'will not actually execute the shell commands.' +
'Think of this as a dry run or a run to be used' +
' with a testing suite',
action="store_true",
required=False)
try:
args, unknown = parser.parse_known_args()
except SystemExit:
pythonGetEnv()
sys.exit(1)
retEnvList = pythonGetEnv()
retTest = ""
if args.test:
retTest = args.test
print "testing: True"
retConfigFile = args.configFile
if retEnvList["CUSTOMER_APP_NAME"]:
retAppName = retEnvList["CUSTOMER_APP_NAME"]
if retEnvList["ENV"]:
retEnv = retEnvList["ENV"]
if not os.path.isfile(retConfigFile):
print 'ERROR: Unable to find config file: ' + retConfigFile + "\n"
sys.exit(1)
# get the key path in case we need it
keyPath = retEnvList["BASE_CUSTOMER_DIR"] + '/' + \
retEnvList["dcDEFAULT_APP_NAME"] + '/' + \
retEnvList["CUSTOMER_APP_UTILS"] + "/keys/"
retAccessKey = keyPath + retEnvList["CUSTOMER_APP_ENV"] + '/' + \
retEnvList["dcDEFAULT_APP_NAME"] + '-' + \
retEnvList["CUSTOMER_APP_ENV"] + "-access.pem"
for file in os.listdir(keyPath):
if file.endswith(".pub"):
retDeployKey = file.replace(r".pub", '')
break
if not retDeployKey:
print "ERROR: The deploy key can not be determined " \
"automatically you will need to pass the name " \
"with the option --deployKey(-k)."
retDest = args.destination
# if we get here then the
return (retAppName, retEnv, retDest, retConfigFile, retAccessKey, keyPath,
retTest)
def main(argv):
(appName, env, dest, configFile, accessKey, keyPath, test) = checkArgs()
if test:
print 'appName is: {}'.format(appName)
print 'configFile is: {}'.format(configFile)
print 'the env is: {}'.format(env)
print 'running in testing mode'
print "destination is: {}".format(dest)
print "path for keys: {}".format(keyPath)
customerApp = UpdateInstance(appName, env, dest, configFile, accessKey,
keyPath, test)
customerApp.run()
if __name__ == "__main__":
main(sys.argv[1:])
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
minor documentation change to the usage. This needs to be moved out of here and into the instance management repo.
#!/usr/bin/env python
import sys
import os
import argparse
from argparse import RawDescriptionHelpFormatter
import subprocess
import json
# from time import time
# import fileinput
# import re
from scripts.process_dc_env import pythonGetEnv
# ==============================================================================
"""
After an update and push to the application repository the code on the
instance needs have it's code updated from the repository. This script will
perform the necessary actions to get the destination compoonent up to date.
"""
__version__ = "0.1"
__copyright__ = "Copyright 2016, devops.center"
__credits__ = ["Bob Lozano", "Gregg Jensen"]
__license__ = "GPL"
__status__ = "Development"
# ==============================================================================
# NEED TO READ IN appArchitecture that was used to build the instance
# we are about to upate. There are to many variables that are already
# defined that we wouldn't have to define again. Also, this would imply
# that the file would be put in the customers appUtils (say in config/$ENV)
# and have it read only to make it give language that says hey while I know
# you can change this but it wasn't meant to be modified. devops.center
# owns the master and this is for reference only
class UpdateInstance:
def __init__(self, theAppName, theEnv, Destination, theConfigFile,
accessKey, pathToKeys, test):
"""UpdateComponent constructor"""
self.appName = theAppName
self.env = theEnv
self.dest = Destination
self.targetUser = "ubuntu"
self.accessKey = accessKey
self.pathToKeys = pathToKeys
self.test = test
self.configList = self.readConfigFile(theConfigFile)
def run(self):
# first remove the log file from a previous run if it exists
logFile = "create-instance.log"
if not self.test:
logFileAndPath = "ec2/" + logFile
if os.path.isfile(logFileAndPath):
os.remove(logFileAndPath)
# go through each element found in the configList
for component in self.configList:
componentInfo = component[0]
componentItem = component[1]
numberToCreate = 1
# go through the environemnt variables and print them out
print "Environment Variables for the component:\n"
for item in componentInfo.keys():
print "{}={}\n".format(item, componentInfo[item]),
# now print out the elements for the component
if componentItem["type"] == 'db':
print ("Name=" + componentItem["name"] + " "
"Number=" + str(numberToCreate) + " "
"Version=" + self.appName + " "
"Environment=" + componentInfo["ENVIRONMENT"] + " "
"Type=" + componentItem["type"] + " "
"Role=" + componentItem["ROLE"] + " ")
else:
print ("Name=" + componentItem["name"] + " "
"Number=" + str(numberToCreate) + " "
"Version=" + self.appName + " "
"Environment=" + componentInfo["ENVIRONMENT"] + " "
"Type=" + componentItem["type"] + " ")
# and now print out the separate components and their count
if componentItem["type"] == 'db':
print ("\nThere will be " + str(numberToCreate) +
" instances of type: " + componentItem["type"] + " "
"and role: " + componentItem["ROLE"])
else:
print ("\nThere will be " + str(numberToCreate) +
" instances of type: " + componentItem["type"] + " ")
# set up the cmd to run
cmdToRun = self.buildCmdToRun(componentInfo, "updateAppInstance")
# and now execute it
if self.test:
print "Command to Run: \n" + cmdToRun
else:
print "Running this command: \n" + cmdToRun
subprocess.call(cmdToRun, shell=True)
def buildCmdToRun(self, componentInfo, optionToAdd):
cmdToRun = "cd scripts; ./updateApp.sh"
cmdToRun += " --accessKey " + self.accessKey
cmdToRun += " --destination " + self.dest
cmdToRun = self.addToCommand(cmdToRun, componentInfo,
"APPNAME", "appName")
cmdToRun = self.addToCommand(cmdToRun, componentInfo,
"PROFILE", "profile")
cmdToRun = self.addToCommand(cmdToRun, componentInfo, "REGION",
"region")
cmdToRun = self.addToCommand(cmdToRun, componentInfo, "ROLE",
"role")
# cmdToRun = self.addToCommand(cmdToRun, componentInfo,
# "BACKUP_S3_REGION",
# "backupS3Region")
# cmdToRun = self.addToCommand(cmdToRun, componentInfo,
# "AVAILABILITYZONE", "az")
# cmdToRun = self.addToCommand(cmdToRun, componentInfo, "VPCID",
# "vpcid")
# cmdToRun = self.addToCommand(cmdToRun, componentInfo,
# "SUBNETID", "subnetid")
# cmdToRun = self.addToCommand(cmdToRun, componentInfo,
# "CIDR_BLOCK", "cidrBlock")
# cmdToRun = self.addToCommand(cmdToRun, componentInfo,
# "CUSTWEBGITBRANCH",
# "custwebgitbranch")
# cmdToRun = self.addToCommand(cmdToRun, componentInfo,
# "DCSTACK_GITBRANCH",
# "dcstackgitbranch")
# cmdToRun = self.addToCommand(cmdToRun, componentInfo,
# "UTILSGITBRANCH",
# "utilsgitbranch")
# cmdToRun = self.addToCommand(cmdToRun, componentInfo,
# "DBNAME", "dbname")
# cmdToRun = self.addToCommand(cmdToRun, componentInfo,
# "BACKUPFILE", "backupfile")
# cmdToRun = self.addToCommand(cmdToRun, componentInfo,
# "PGVERSION", "pgversion")
# cmdToRun = self.addToCommand(cmdToRun, componentInfo,
# "STACK", "stack")
# cmdToRun = self.addToCommand(cmdToRun, componentInfo,
# "UTILS", "utils")
# CUSTOMER_UTILS has the git string
cmdToRun = self.addToCommand(cmdToRun, componentInfo,
"CUSTOMER_UTILS", "gitString")
cmdToRun = self.addToCommand(cmdToRun, componentInfo,
"CUSTUTILSGITBRANCH",
"custutilsgitbranch")
cmdToRun = self.addToCommand(cmdToRun, componentInfo,
"DEPLOYMENT_KEYPAIR", "deploykey")
# cmdToRun = self.addToCommand(cmdToRun, componentInfo,
# "DNS_METHOD", "dns")
# cmdToRun = self.addToCommand(cmdToRun, componentInfo,
# "REDIS", "redis")
# cmdToRun = self.addToCommand(cmdToRun, componentInfo,
# "REDISFOLLOWER", "redisfollower")
# cmdToRun = self.addToCommand(cmdToRun, componentInfo,
# "ROUTE53ZONE", "route53zone")
cmdToRun = self.addToCommand(cmdToRun, componentInfo,
"ENVIRONMENT", "env")
cmdToRun = self.addToCommand(cmdToRun, componentInfo,
"LOCAL_KEYPAIR_DIR",
"localKeyPairDir")
# cmdToRun = self.addToCommand(cmdToRun, componentInfo,
# "LOCAL_KEYPAIR",
# "localKeyPair")
# cmdToRun = self.addToCommand(cmdToRun, componentInfo,
# "COMBINED_WEB_WORKER",
# "combinedWebWorker")
#
return cmdToRun
def addToCommand(self, string, componentInfo, aKey, cmdOption):
returnStr = string
if componentInfo[aKey]:
keyValue = ""
if isinstance(componentInfo[aKey], (int, long)):
keyValue = str(componentInfo[aKey])
else:
keyValue = componentInfo[aKey]
returnStr += " --" + cmdOption + " " + keyValue
return returnStr
def readConfigFile(self, aConfigFile):
"""read the json config file and return a List of the elements found
defined in the file"""
# first read in the file
data = []
with open(aConfigFile) as data_file:
data = json.load(data_file)
# now parse the data and make the list of components
returnList = []
# going through the components need to check if there is an
# variable that needs to overwrite one of the default ones
for item in data["components"]:
# a single element in the list will consist of the component-info
# and the component details, all of which are just key value pairs
componentInfo = dict(data["component-info"])
# overwrite the appropriate variables with the appropriate command
# line version of the options
# componentInfo["PROFILE"] = self.appName
if self.env:
componentInfo["ENVIRONMENT"] = self.env
if "type" in item:
componentInfo["SUFFIX"] = item["type"]
if "ROLE" in item:
componentInfo["ROLE"] = item["ROLE"]
if "DEPLOYMENT_KEYPAIR" in componentInfo:
# need to get the "keys' path and create new
pathAndDeployKey = self.pathToKeys + "/" + \
componentInfo["DEPLOYMENT_KEYPAIR"]
componentInfo["DEPLOYMENT_KEYPAIR"] = pathAndDeployKey
if "LOCAL_KEYPAIR" in componentInfo:
componentInfo["LOCAL_KEYPAIR_DIR"] = self.pathToKeys
for aKey in item.keys():
if aKey in componentInfo:
componentInfo[aKey] = item[aKey]
else:
componentInfo[aKey] = item[aKey]
returnList.append((componentInfo, item))
return returnList
def checkArgs():
parser = argparse.ArgumentParser(
description='This script provides an administrative interface to a ' +
'customers application to perform an update on a target instance. ' +
'Once the configuration has been ' +
'changed and committed to the respository, call this script to ' +
'have the code be updated on the instance. \n\n'
'Example cmd line to update an application with a new environment:\n'
'./updateAppInstance.py --baseDirectory ~/someDir/YourAppDir\n'
' --appName YourApp\n'
' --command update\n'
' --option "newEnv = UAT"\n',
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('-c', '--configFile', help='The json config file ' +
'that defines the architecture for the appName',
required=True)
parser.add_argument('-d', '--destination', help='The target instance to ' +
'have the update performed on.',
required=True)
parser.add_argument('-x', '--accessKey', help='the access key to get to ' +
'the instance.',
required=True)
parser.add_argument('-t', '--test', help='Will run the script but ' +
'will not actually execute the shell commands.' +
'Think of this as a dry run or a run to be used' +
' with a testing suite',
action="store_true",
required=False)
try:
args, unknown = parser.parse_known_args()
except SystemExit:
pythonGetEnv()
sys.exit(1)
retEnvList = pythonGetEnv()
retTest = ""
if args.test:
retTest = args.test
print "testing: True"
retConfigFile = args.configFile
if retEnvList["CUSTOMER_APP_NAME"]:
retAppName = retEnvList["CUSTOMER_APP_NAME"]
if retEnvList["ENV"]:
retEnv = retEnvList["ENV"]
if not os.path.isfile(retConfigFile):
print 'ERROR: Unable to find config file: ' + retConfigFile + "\n"
sys.exit(1)
# get the key path in case we need it
keyPath = retEnvList["BASE_CUSTOMER_DIR"] + '/' + \
retEnvList["dcDEFAULT_APP_NAME"] + '/' + \
retEnvList["CUSTOMER_APP_UTILS"] + "/keys/"
retAccessKey = keyPath + retEnvList["CUSTOMER_APP_ENV"] + '/' + \
retEnvList["dcDEFAULT_APP_NAME"] + '-' + \
retEnvList["CUSTOMER_APP_ENV"] + "-access.pem"
for file in os.listdir(keyPath):
if file.endswith(".pub"):
retDeployKey = file.replace(r".pub", '')
break
if not retDeployKey:
print "ERROR: The deploy key can not be determined " \
"automatically you will need to pass the name " \
"with the option --deployKey(-k)."
retDest = args.destination
# if we get here then the
return (retAppName, retEnv, retDest, retConfigFile, retAccessKey, keyPath,
retTest)
def main(argv):
(appName, env, dest, configFile, accessKey, keyPath, test) = checkArgs()
if test:
print 'appName is: {}'.format(appName)
print 'configFile is: {}'.format(configFile)
print 'the env is: {}'.format(env)
print 'running in testing mode'
print "destination is: {}".format(dest)
print "path for keys: {}".format(keyPath)
customerApp = UpdateInstance(appName, env, dest, configFile, accessKey,
keyPath, test)
customerApp.run()
if __name__ == "__main__":
main(sys.argv[1:])
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
from __future__ import unicode_literals
from distutils.command.upload import upload
import logging
import os
from datetime import timedelta
from ckeditor.fields import RichTextField
from django import forms
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.db.models import Max, JSONField
from django.template.defaulttags import register
from django.utils.timezone import now
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ValidationError
from archive.fields import PublicFileField
logger = logging.getLogger('date')
POST_SLUG_MAX_LENGTH = 50
def upload_to(instance, filename):
filename_base, filename_ext = os.path.splitext(filename)
file_location = "events/{filename}{extension}".format(
filename=slugify(filename_base),
extension=filename_ext.lower(),
)
return file_location
class Event(models.Model):
title = models.CharField(_('Titel'), max_length=255, blank=False)
content = RichTextField(_('Innehåll'), blank=True)
event_date_start = models.DateTimeField(_('Startdatum'), default=now)
event_date_end = models.DateTimeField(_('Slutdatum'), default=now)
sign_up_max_participants = models.IntegerField(_('Maximal antal deltagare'),
choices=[(0, u"Ingen begränsning")] + list(
zip(range(1, 200), range(1, 200))), default=0)
sign_up = models.BooleanField(_('Anmälning'), default=True)
sign_up_members = models.DateTimeField(_('Anmälan öppnas (medlemmar)'), null=True, blank=True, default=now)
sign_up_others = models.DateTimeField(_('Anmälan öppnas (övriga)'), null=True, blank=True, default=now)
sign_up_deadline = models.DateTimeField(_('Anmälningen stängs'), null=True, blank=True, default=now)
sign_up_cancelling = models.BooleanField(_('Avanmälning'), default=True)
sign_up_cancelling_deadline = models.DateTimeField(_('Avanmälningen stängs'), null=True, blank=True, default=now)
author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
created_time = models.DateTimeField(_('Skapad'), default=now)
published_time = models.DateTimeField(_('Publicerad'), editable=False, null=True, blank=True)
modified_time = models.DateTimeField(_('Modifierad'), editable=False, null=True, blank=True)
published = models.BooleanField(_('Publicera'), default=True)
slug = models.SlugField(_('Slug'), unique=True, allow_unicode=False, max_length=POST_SLUG_MAX_LENGTH, blank=True)
sign_up_avec = models.BooleanField(_('Avec'), default=False)
members_only = models.BooleanField(_('Kräv inloggning för innehåll'), default=False)
passcode = models.CharField(_('Passcode'), max_length=255, blank=True)
image = models.ImageField(_('Bakgrundsbild'), null=True, blank=True, upload_to=upload_to)
s3_image = PublicFileField(verbose_name=_('Bakgrundsbild'), null=True, blank=True, upload_to=upload_to)
class Meta:
verbose_name = _('evenemang')
verbose_name_plural = _('evenemang')
ordering = ('id',)
def __str__(self):
return self.title
def event_date_start_pretty(self):
return self.event_date_start.strftime("%-d %B")
def publish(self):
self.published_time = now()
self.published = True
self.save()
def unpublish(self):
self.published = False
self.save()
def update(self):
self.modified_time = now()
self.save()
def get_registrations(self):
return EventAttendees.objects.filter(event=self).order_by('attendee_nr')
def get_highest_attendee_nr(self):
return EventAttendees.objects.filter(event=self).aggregate(Max('attendee_nr'))
def add_event_attendance(self, user, email, anonymous, preferences, avec_for=None):
if self.sign_up:
try:
registration = EventAttendees.objects.get(email=email, event=self)
except ObjectDoesNotExist:
user_pref = {}
if self.get_registration_form():
for item in self.get_registration_form():
user_pref[str(item)] = preferences.get(str(item))
registration = EventAttendees.objects.create(user=user,
event=self, email=email,
time_registered=now(), preferences=user_pref,
anonymous=anonymous, avec_for=avec_for)
return registration
def cancel_event_attendance(self, user):
if self.sign_up:
registration = EventAttendees.objects.get(user=user, event=self)
registration.delete()
def registration_is_open_members(self):
return now() >= self.sign_up_members and not self.registation_past_due()
def registration_is_open_others(self):
return now() >= self.sign_up_others and not self.registation_past_due()
def registation_past_due(self):
return now() > self.sign_up_deadline
def event_is_full(self):
if self.sign_up_max_participants == 0:
return False
return EventAttendees.objects.filter(event=self).count() >= self.sign_up_max_participants
def get_registration_form(self):
if EventRegistrationForm.objects.filter(event=self).count() == 0:
return None
return EventRegistrationForm.objects.filter(event=self).order_by('-id')
def get_registration_form_public_info(self):
return EventRegistrationForm.objects.filter(event=self, public_info=True)
def make_registration_form(self, data=None):
if self.sign_up:
fields = {'user': forms.CharField(label='Namn', max_length=255),
'email': forms.EmailField(label='Email', validators=[self.validate_unique_email]),
'anonymous': forms.BooleanField(label='Anonymt', required=False)}
if self.get_registration_form():
for question in reversed(self.get_registration_form()):
if question.type == "select":
choices = question.choice_list.split(',')
fields[question.name] = forms.ChoiceField(label=question.name,
choices=list(map(list, zip(choices, choices))),
required=question.required)
elif question.type == "checkbox":
fields[question.name] = forms.BooleanField(label=question.name, required=question.required)
elif question.type == "text":
fields[question.name] = forms.CharField(label=question.name, required=question.required)
if self.sign_up_avec:
fields['avec'] = forms.BooleanField(label='Avec', required=False)
fields['avec_user'] = forms.CharField(label='Namn', max_length=255, required=False, widget=forms.TextInput(attrs={'class': "avec-field"}))
fields['avec_email'] = forms.EmailField(label='Email', validators=[self.validate_unique_email], required=False, widget=forms.TextInput(attrs={'class': "avec-field"}))
fields['avec_anonymous'] = forms.BooleanField(label='Anonymt', required=False, widget=forms.CheckboxInput(attrs={'class': "avec-field"}))
if self.get_registration_form():
for question in reversed(self.get_registration_form()):
if not question.hide_for_avec:
if question.type == "select":
choices = question.choice_list.split(',')
fields['avec_'+question.name] = forms.ChoiceField(label=question.name,
choices=list(map(list, zip(choices, choices))),
required=False, widget=forms.Select(attrs={'class': "avec-field"}))
elif question.type == "checkbox":
fields['avec_'+question.name] = forms.BooleanField(label=question.name, required=False, widget=forms.CheckboxInput(attrs={'class': "avec-field"}))
elif question.type == "text":
fields['avec_'+question.name] = forms.CharField(label=question.name, required=False, widget=forms.TextInput(attrs={'class': "avec-field"}))
return type('EventAttendeeForm', (forms.BaseForm,), {'base_fields': fields, 'data': data}, )
@register.filter
def show_attendee_list(self):
return self.event_date_end > now() + timedelta(-1)
def validate_unique_email(self, email):
attendees = self.get_registrations()
for attendee in attendees:
if email == attendee.email:
logger.debug("SAME EMAIL")
raise ValidationError(_("Det finns redan någon anmäld med denna email"))
class EventRegistrationForm(models.Model):
event = models.ForeignKey(Event, verbose_name='Event', on_delete=models.CASCADE)
name = models.CharField(_('Namn'), max_length=255, blank=True)
type = models.CharField(_('Typ'),
choices=(("text", "Text"), ("select", "Multiple choice"), ("checkbox", "Kryssryta")),
blank=True, max_length=255, null=True)
required = models.BooleanField(_('Krävd'), default=False)
public_info = models.BooleanField(_('Öppen info'), default=False)
choice_list = models.CharField(_('Alternativ'), max_length=255, blank=True)
hide_for_avec = models.BooleanField(_('Göm för avec'), default=False)
class Meta:
verbose_name = _('Anmälningsfält')
verbose_name_plural = _('Anmälningsfält')
def __str__(self):
return str(self.name)
def get_choices(self):
return str(self.choice_list).split(',')
class EventAttendees(models.Model):
event = models.ForeignKey(Event, verbose_name='Event', on_delete=models.CASCADE)
attendee_nr = models.PositiveSmallIntegerField(_('#'))
user = models.CharField(_('Namn'), blank=False, max_length=255)
email = models.EmailField(_('E-postadress'), blank=False, null=True, unique=False)
preferences = JSONField(_('Svar'), default=list, blank=True)
anonymous = models.BooleanField(_('Anonymt'), default=False)
time_registered = models.DateTimeField(_('Registrerad'))
avec_for = models.ForeignKey("self", verbose_name=_('Avec till'), null=True, blank=True, on_delete=models.SET_NULL)
class Meta:
verbose_name = _('deltagare')
verbose_name_plural = _('deltagare')
ordering = ['time_registered', ]
unique_together = ('event', 'email')
def __str__(self):
return str(self.user)
@register.filter
def get_preference(self, key):
return self.preferences.get(str(key), "")
def save(self, *args, **kwargs):
if self.attendee_nr is None:
# attendee_nr increments by 10, e.g 10,20,30,40...
# this is needed so the admin sorting library will work.
self.attendee_nr = (self.event.get_registrations().count()+1) * 10
# Add ten from highest attendee_nr so signups dont get in weird order after deletions.
if self.event.get_highest_attendee_nr().get('attendee_nr__max'):
self.attendee_nr = self.event.get_highest_attendee_nr().get('attendee_nr__max') + 10
if self.time_registered is None:
self.time_registered = now()
if isinstance(self.preferences, list):
self.preferences = {}
super(EventAttendees, self).save(*args, **kwargs)
Add basic handling for null events (#254)
from __future__ import unicode_literals
from distutils.command.upload import upload
import logging
import os
from datetime import timedelta
from ckeditor.fields import RichTextField
from django import forms
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.db.models import Max, JSONField
from django.template.defaulttags import register
from django.utils.timezone import now
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ValidationError
from archive.fields import PublicFileField
logger = logging.getLogger('date')
POST_SLUG_MAX_LENGTH = 50
def upload_to(instance, filename):
filename_base, filename_ext = os.path.splitext(filename)
file_location = "events/{filename}{extension}".format(
filename=slugify(filename_base),
extension=filename_ext.lower(),
)
return file_location
class Event(models.Model):
title = models.CharField(_('Titel'), max_length=255, blank=False)
content = RichTextField(_('Innehåll'), blank=True)
event_date_start = models.DateTimeField(_('Startdatum'), default=now)
event_date_end = models.DateTimeField(_('Slutdatum'), default=now)
sign_up_max_participants = models.IntegerField(_('Maximal antal deltagare'),
choices=[(0, u"Ingen begränsning")] + list(
zip(range(1, 200), range(1, 200))), default=0)
sign_up = models.BooleanField(_('Anmälning'), default=True)
sign_up_members = models.DateTimeField(_('Anmälan öppnas (medlemmar)'), null=True, blank=True, default=now)
sign_up_others = models.DateTimeField(_('Anmälan öppnas (övriga)'), null=True, blank=True, default=now)
sign_up_deadline = models.DateTimeField(_('Anmälningen stängs'), null=True, blank=True, default=now)
sign_up_cancelling = models.BooleanField(_('Avanmälning'), default=True)
sign_up_cancelling_deadline = models.DateTimeField(_('Avanmälningen stängs'), null=True, blank=True, default=now)
author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
created_time = models.DateTimeField(_('Skapad'), default=now)
published_time = models.DateTimeField(_('Publicerad'), editable=False, null=True, blank=True)
modified_time = models.DateTimeField(_('Modifierad'), editable=False, null=True, blank=True)
published = models.BooleanField(_('Publicera'), default=True)
slug = models.SlugField(_('Slug'), unique=True, allow_unicode=False, max_length=POST_SLUG_MAX_LENGTH, blank=True)
sign_up_avec = models.BooleanField(_('Avec'), default=False)
members_only = models.BooleanField(_('Kräv inloggning för innehåll'), default=False)
passcode = models.CharField(_('Passcode'), max_length=255, blank=True)
image = models.ImageField(_('Bakgrundsbild'), null=True, blank=True, upload_to=upload_to)
s3_image = PublicFileField(verbose_name=_('Bakgrundsbild'), null=True, blank=True, upload_to=upload_to)
class Meta:
verbose_name = _('evenemang')
verbose_name_plural = _('evenemang')
ordering = ('id',)
def __str__(self):
return self.title
def event_date_start_pretty(self):
return self.event_date_start.strftime("%-d %B")
def publish(self):
self.published_time = now()
self.published = True
self.save()
def unpublish(self):
self.published = False
self.save()
def update(self):
self.modified_time = now()
self.save()
def get_registrations(self):
return EventAttendees.objects.filter(event=self).order_by('attendee_nr')
def get_highest_attendee_nr(self):
return EventAttendees.objects.filter(event=self).aggregate(Max('attendee_nr'))
def add_event_attendance(self, user, email, anonymous, preferences, avec_for=None):
if self.sign_up:
try:
registration = EventAttendees.objects.get(email=email, event=self)
except ObjectDoesNotExist:
user_pref = {}
if self.get_registration_form():
for item in self.get_registration_form():
user_pref[str(item)] = preferences.get(str(item))
registration = EventAttendees.objects.create(user=user,
event=self, email=email,
time_registered=now(), preferences=user_pref,
anonymous=anonymous, avec_for=avec_for)
return registration
def cancel_event_attendance(self, user):
if self.sign_up:
registration = EventAttendees.objects.get(user=user, event=self)
registration.delete()
def registration_is_open_members(self):
if self.sign_up_members is None:
return False
return now() >= self.sign_up_members and not self.registation_past_due()
def registration_is_open_others(self):
if self.sign_up_others is None:
return False
return now() >= self.sign_up_others and not self.registation_past_due()
def registation_past_due(self):
if self.sign_up_deadline is None:
return False
return now() > self.sign_up_deadline
def event_is_full(self):
if self.sign_up_max_participants == 0:
return False
return EventAttendees.objects.filter(event=self).count() >= self.sign_up_max_participants
def get_registration_form(self):
if EventRegistrationForm.objects.filter(event=self).count() == 0:
return None
return EventRegistrationForm.objects.filter(event=self).order_by('-id')
def get_registration_form_public_info(self):
return EventRegistrationForm.objects.filter(event=self, public_info=True)
def make_registration_form(self, data=None):
if self.sign_up:
fields = {'user': forms.CharField(label='Namn', max_length=255),
'email': forms.EmailField(label='Email', validators=[self.validate_unique_email]),
'anonymous': forms.BooleanField(label='Anonymt', required=False)}
if self.get_registration_form():
for question in reversed(self.get_registration_form()):
if question.type == "select":
choices = question.choice_list.split(',')
fields[question.name] = forms.ChoiceField(label=question.name,
choices=list(map(list, zip(choices, choices))),
required=question.required)
elif question.type == "checkbox":
fields[question.name] = forms.BooleanField(label=question.name, required=question.required)
elif question.type == "text":
fields[question.name] = forms.CharField(label=question.name, required=question.required)
if self.sign_up_avec:
fields['avec'] = forms.BooleanField(label='Avec', required=False)
fields['avec_user'] = forms.CharField(label='Namn', max_length=255, required=False, widget=forms.TextInput(attrs={'class': "avec-field"}))
fields['avec_email'] = forms.EmailField(label='Email', validators=[self.validate_unique_email], required=False, widget=forms.TextInput(attrs={'class': "avec-field"}))
fields['avec_anonymous'] = forms.BooleanField(label='Anonymt', required=False, widget=forms.CheckboxInput(attrs={'class': "avec-field"}))
if self.get_registration_form():
for question in reversed(self.get_registration_form()):
if not question.hide_for_avec:
if question.type == "select":
choices = question.choice_list.split(',')
fields['avec_'+question.name] = forms.ChoiceField(label=question.name,
choices=list(map(list, zip(choices, choices))),
required=False, widget=forms.Select(attrs={'class': "avec-field"}))
elif question.type == "checkbox":
fields['avec_'+question.name] = forms.BooleanField(label=question.name, required=False, widget=forms.CheckboxInput(attrs={'class': "avec-field"}))
elif question.type == "text":
fields['avec_'+question.name] = forms.CharField(label=question.name, required=False, widget=forms.TextInput(attrs={'class': "avec-field"}))
return type('EventAttendeeForm', (forms.BaseForm,), {'base_fields': fields, 'data': data}, )
@register.filter
def show_attendee_list(self):
return self.event_date_end > now() + timedelta(-1)
def validate_unique_email(self, email):
attendees = self.get_registrations()
for attendee in attendees:
if email == attendee.email:
logger.debug("SAME EMAIL")
raise ValidationError(_("Det finns redan någon anmäld med denna email"))
class EventRegistrationForm(models.Model):
event = models.ForeignKey(Event, verbose_name='Event', on_delete=models.CASCADE)
name = models.CharField(_('Namn'), max_length=255, blank=True)
type = models.CharField(_('Typ'),
choices=(("text", "Text"), ("select", "Multiple choice"), ("checkbox", "Kryssryta")),
blank=True, max_length=255, null=True)
required = models.BooleanField(_('Krävd'), default=False)
public_info = models.BooleanField(_('Öppen info'), default=False)
choice_list = models.CharField(_('Alternativ'), max_length=255, blank=True)
hide_for_avec = models.BooleanField(_('Göm för avec'), default=False)
class Meta:
verbose_name = _('Anmälningsfält')
verbose_name_plural = _('Anmälningsfält')
def __str__(self):
return str(self.name)
def get_choices(self):
return str(self.choice_list).split(',')
class EventAttendees(models.Model):
event = models.ForeignKey(Event, verbose_name='Event', on_delete=models.CASCADE)
attendee_nr = models.PositiveSmallIntegerField(_('#'))
user = models.CharField(_('Namn'), blank=False, max_length=255)
email = models.EmailField(_('E-postadress'), blank=False, null=True, unique=False)
preferences = JSONField(_('Svar'), default=list, blank=True)
anonymous = models.BooleanField(_('Anonymt'), default=False)
time_registered = models.DateTimeField(_('Registrerad'))
avec_for = models.ForeignKey("self", verbose_name=_('Avec till'), null=True, blank=True, on_delete=models.SET_NULL)
class Meta:
verbose_name = _('deltagare')
verbose_name_plural = _('deltagare')
ordering = ['time_registered', ]
unique_together = ('event', 'email')
def __str__(self):
return str(self.user)
@register.filter
def get_preference(self, key):
return self.preferences.get(str(key), "")
def save(self, *args, **kwargs):
if self.attendee_nr is None:
# attendee_nr increments by 10, e.g 10,20,30,40...
# this is needed so the admin sorting library will work.
self.attendee_nr = (self.event.get_registrations().count()+1) * 10
# Add ten from highest attendee_nr so signups dont get in weird order after deletions.
if self.event.get_highest_attendee_nr().get('attendee_nr__max'):
self.attendee_nr = self.event.get_highest_attendee_nr().get('attendee_nr__max') + 10
if self.time_registered is None:
self.time_registered = now()
if isinstance(self.preferences, list):
self.preferences = {}
super(EventAttendees, self).save(*args, **kwargs)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Created on Feb, 27 2013
@author: heinz-peterlang
albert weichselbraun
Handles the new (http://www.weblyzard.com/wl/2013#) weblyzard
XML format.
Functions added:
- support for sentence tokens and pos iterators
Remove functions:
- compatibility fixes for namespaces, encodings etc.
- support for the old POS tags mapping.
'''
import logging
from lxml import etree
from pprint import pprint
import unittest
import hashlib
from weblyzard_api.xml_content.parsers.xml_2005 import XML2005
from weblyzard_api.xml_content.parsers.xml_2013 import XML2013
from weblyzard_api.xml_content.parsers.xml_deprecated import XMLDeprecated
SENTENCE_ATTRIBUTES = ('pos_tags', 'sem_orient', 'significance', 'md5sum',
'pos', 'token')
class Sentence(object):
'''
The sentence class used for accessing single sentences.
Note: the class provides convenient properties for accessing pos tags
and tokens:
.sentence: sentence text
.tokens : provides a list of tokens (e.g. ['A', 'new', 'day'])
.pos_tags: provides a list of pos tags (e.g. ['DET', 'CC', 'NN'])
'''
def __init__(self, md5sum=None, pos=None, sem_orient=None, significance=None,
token=None, value=None, is_title=False, dependencies=None):
if not md5sum and value:
try:
m = hashlib.md5()
m.update(value.encode('utf-8') if isinstance(value, unicode) else str(value))
md5sum = m.hexdigest()
except Exception, e:
print e
self.md5sum = md5sum
self.pos = pos
self.sem_orient = sem_orient
self.significance = significance
self.token = token
self.value = value
self.is_title = is_title
self.dependencies = dependencies
def as_dict(self):
return dict((k, v) for k, v in self.__dict__.iteritems() if not k.startswith('_'))
class XMLContent(object):
SUPPORTED_XML_VERSIONS = {XML2005.VERSION: XML2005,
XML2013.VERSION: XML2013,
XMLDeprecated.VERSION: XMLDeprecated}
def __init__(self, xml_content):
self.xml_version = None
self.attributes = {}
self.sentence_objects = []
self.titles = []
result = self.parse_xml_content(xml_content)
if result:
self.xml_version, self.attributes, self.sentence_objects, self.titles = result
@classmethod
def parse_xml_content(cls, xml_content):
xml_version = cls.get_xml_version(xml_content)
if not xml_version or not xml_content:
return None
sentence_objects = []
parser = cls.SUPPORTED_XML_VERSIONS[xml_version]
attributes, sentences = parser.parse(xml_content)
if 'title' in attributes:
titles = [Sentence(value=attributes['title'], is_title=True)]
else:
titles = []
for sentence in sentences:
sent_obj = Sentence(**sentence)
if sent_obj.is_title:
titles.append(sent_obj)
else:
sentence_objects.append(sent_obj)
return xml_version, attributes, sentence_objects, titles
@classmethod
def get_xml_version(cls, xml_content):
if not xml_content:
return None
for version, xml_parser in cls.SUPPORTED_XML_VERSIONS.iteritems():
if xml_parser.is_supported(xml_content):
return version
def get_xml_document(self, header_fields='all',
sentence_attributes=SENTENCE_ATTRIBUTES,
xml_version=XML2013.VERSION):
if not xml_version:
xml_version = self.xml_version
return self.SUPPORTED_XML_VERSIONS[xml_version].dump_xml(titles=self.titles,
attributes=self.attributes,
sentences=self.sentences)
def get_plain_text(self):
''' returns the plain text of the XML content '''
if not len(self.sentences):
return ''
return '\n'.join([s.value for s in self.sentences if not s.is_title])
@classmethod
def get_text(cls, text):
''' encodes the text '''
if isinstance(text, str):
text = text.decode('utf-8')
return text
def add_attribute(self, key, value):
if not self.attributes:
self.attributes = {}
self.attributes[key] = value
def update_attributes(self, new_attributes):
''' updates the existing attributes with new ones '''
# not using dict.update to allow advanced processing
if not new_attributes or not isinstance(new_attributes, dict):
return
for k, v in new_attributes.iteritems():
self.attributes[str(k)] = v
def as_dict(self, mapping=None, ignore_non_sentence=False):
''' convert the XML content to a dictionary.
:param mapping, an optional mapping by which to restrict/rename
the returned dictionary
:param ignore_non_sentence: if true: sentences without without POS tags
are omitted from the result
'''
# try:?
if True:
assert mapping, 'got no mapping'
result = self.apply_dict_mapping(self.attributes, mapping)
sentence_attr_name = mapping['sentences'] if 'sentences' in mapping else 'sentences'
if 'sentences_map' in mapping:
result[sentence_attr_name] = []
sent_mapping = mapping['sentences_map']
for sent in self.sentences:
if ignore_non_sentence and not sent.pos:
continue
sent_attributes = self.apply_dict_mapping(sent.as_dict(),
sent_mapping)
result[sentence_attr_name].append(sent_attributes)
# except Exception, e:
# print e
# result = self.attributes
# result.update({'sentences': [sent.as_dict() for sent in self.sentences]})
return result
@classmethod
def apply_dict_mapping(cls, attributes, mapping=None):
result = attributes
if mapping:
result = {}
for attr, value in attributes.iteritems():
if attr in mapping:
result[mapping[attr]] = value
return result
def _get_attribute(self, attr_name):
''' @return: the attribute for the given name '''
return self.attributes.get(attr_name, None)
def get_nilsimsa(self):
return self._get_attribute('nilsimsa')
def get_content_type(self):
return self._get_attribute('content_type')
def get_title(self):
return self._get_attribute('title')
def get_lang(self):
return self._get_attribute('lang')
def get_content_id(self):
content_id = self._get_attribute('content_id')
return int(content_id) if content_id else content_id
def get_sentences(self):
return self.sentence_objects
def update_sentences(self, sentences):
''' updates the values of the existing sentences. if the list of
sentence object is empty, sentence_objects will be set to the new
sentences. WARNING: this function will not add new sentences
:param sentences: list of Sentence objects
'''
if not self.sentence_objects:
self.sentence_objects = sentences
else:
sentence_dict = dict((sent.md5sum, sent) for sent in sentences)
for sentence in self.sentence_objects:
if sentence.md5sum in sentence_dict:
new_sentence = sentence_dict[sentence.md5sum]
for attrib in SENTENCE_ATTRIBUTES:
new_value = getattr(new_sentence, attrib)
if new_value:
setattr(sentence, attrib, new_value)
sentences = property(get_sentences, update_sentences)
plain_text = property(get_plain_text)
nilsimsa = property(get_nilsimsa)
content_type = property(get_content_type)
title = property(get_title)
lang = property(get_lang)
content_id = property(get_content_id)
class TestXMLContent(unittest.TestCase):
def setUp(self):
'''
:
'''
self.xml_content = '''
<wl:page xmlns:wl="http://www.weblyzard.com/" content_id="228557824" content_type="text/html" lang="DE" title="Der ganze Wortlaut: Offener Brief an Niko Pelinka | Heute.at ">
<wl:sentence id="7e985ffb692bb6f617f25619ecca39a9"><![CDATA[Ich hasse scheiß encodings .... ]]></wl:sentence>
<wl:sentence id="7e985ffb692bb6f617f25619ecca3910"><![CDATA[Pöses ärbeiten am Wochenende ... scheiß encodings ]]></wl:sentence>
</wl:page> '''
# def test_update_sentences(self):
# xml_content = self.xml_content
# sentences = [Sentence('7e985ffb692bb6f617f25619ecca39a9'),
# Sentence('7e985ffb692bb6f617f25619ecca3910')]
#
# for s in sentences:
# s.pos_tags = 'nn nn'
# s.significance = 3
# s.sem_orient = 1
#
# xml = XMLContent(xml_content)
#
# print xml.get_xml_document()
#
# for sentence in xml.sentences:
# print sentence.md5sum, sentence.value, sentence.significance
#
# xml.sentences = sentences
#
# xml_out = xml.get_xml_document()
#
# for sentence in xml.sentences:
# assert sentence.significance == 3
# assert sentence.sem_orient == 1
#
# assert 'CDATA' in xml_out
#
# def test_double_sentences(self):
# xml_content = '''
# <wl:page xmlns:wl="http://www.weblyzard.com/" content_id="228557824" content_type="text/html" lang="DE" title="Der ganze Wortlaut: Offener Brief an Niko Pelinka | Heute.at ">
# <wl:sentence id="7e985ffb692bb6f617f25619ecca39a9"><![CDATA[Der ganze Wortlaut]]></wl:sentence>
# <wl:sentence id="7e985ffb692bb6f617f25619ecca39a9"><![CDATA[Der ganze Wortlaut]]></wl:sentence>
# </wl:page> '''
#
# xml = XMLContent(xml_content)
# assert len(xml.sentences) == 1, 'got %s sentences' % len(xml.sentences)
# xml_out = xml.get_xml_document()
# assert 'CDATA' in xml_out
#
# def test_empty_content(self):
# xml = XMLContent(None)
# assert '' == xml.get_plain_text()
# assert [] == xml.get_sentences()
#
# # def test_pos_tags(self):
# # xml = XMLContent(self.xml_content)
# # for sentence in xml.sentences:
# # sentence.pos_tags = 'NN NN AA'
# #
# # rdf = xml.get_pos_tags()
# # assert '<rdf:RDF xmlns' in rdf
#
# def test_attributes(self):
# ''' '''
# xml = XMLContent(self.xml_content)
#
# assert 'Der ganze Wortlaut' in xml.title
# assert xml.lang == 'DE'
# assert xml.content_type == 'text/html'
# assert xml.nilsimsa == None
# assert xml.content_id == 228557824
#
#
# def test_supported_version(self):
#
# new_xml = '''
# <wl:page xmlns:wl="http://www.weblyzard.com/wl/2013#"
# xmlns:dc="http://purl.org/dc/elements/1.1/"
# wl:id="578351358"
# dc:format="text/html"
# xml:lang="de"
# wl:nilsimsa="37345e380610614cc7696ac08ed098e05fa64211755da1d4f525ef4cd762726e">
# <wl:sentence
# wl:pos="NN $( NN APPR ADJA NN VVPP" wl:id="b42bb3f2cb7ed667ba311811823f37cf"
# wl:token="0,20 21,22 23,38 39,42 43,49 50,56 57,64"
# wl:sem_orient="0.0"
# wl:significance="0.0"
# wl:is_title="true">
# <![CDATA[Freihandelsgespr??che - Erleichterungen f??r kleine Firmen geplant]]>
# </wl:sentence>
# <wl:sentence
# wl:pos="NE $( NE ( NE ) $( ADJA KON ADJA NN VMFIN APPR NN APPRART NN APPR ART NE VVFIN $."
# wl:id="05e9a90a82d67702cc19af457222c5b6"
# wl:token="0,7 7,8 8,18 19,20 20,27 27,28 29,30 31,37 38,41 42,50 51,62 63,69 70,73 74,89 90,94 95,101 102,105 106,109 110,113 114,120 120,121"
# wl:sem_orient="0.0" wl:significance="0.0">
# <![CDATA[Br??ssel/Washington (APA/dpa) - Kleine und mittlere Unternehmen k??nnen auf Erleichterungen beim Handel mit den USA hoffen.]]>
# </wl:sentence>
# <wl:sentence
# wl:pos="APPR ART NN APPRART NN NN $( KON NN ( NE ) VVINF ART NN KON ART NE APPR ART ADJA NN $, PRELS PRF ADJA NN ( NE ) VVINF VMFIN $."
# wl:id="9469bb40cbcbb8fba2e31567e135d43d"
# wl:token="0,3 4,7 8,21 22,25 26,43 44,51 51,52 53,56 57,77 78,79 79,83 83,84 85,96 97,100 101,103 104,107 108,111 112,115 116,120 121,124 125,132 133,140 140,141 142,145 146,150 151,168 169,180 181,182 182,185 185,186 187,193 194,198 198,199"
# wl:sem_orient="0.0"
# wl:significance="0.0">
# <![CDATA[Bei den Verhandlungen zum Transatlantischen Handels- und Investitionsabkommen (TTIP) diskutieren die EU und die USA ??ber ein eigenes Kapitel, das sich mittelst??ndischen Unternehmen (KMU) widmen soll.]]>
# </wl:sentence>
# <wl:sentence
# wl:pos="PDS VVFIN PIDAT NN APPR ART APPRART NN ADJA ADJA NN $."
# wl:id="a3e062af32c8b12f4c42ffd57063f531"
# wl:token="0,3 4,13 14,19 20,26 27,29 30,35 36,38 39,46 47,63 64,75 76,84 84,85"
# wl:sem_orient="0.0"
# wl:significance="0.0">
# <![CDATA[Das schreiben beide Seiten in einem am Freitag ver??ffentlichten gemeinsamen Dokument.]]>
# </wl:sentence>
# <wl:sentence
# wl:pos="ART NN VAFIN ART ADJA ADJA NN $."
# wl:id="90d951f171fa74fbda2177341906cb77"
# wl:token="0,3 4,8 9,12 13,16 17,27 28,41 42,53 53,54"
# wl:sem_orient="0.0"
# wl:significance="0.0">
# <![CDATA[Das Ziel sei ein leichterer gegenseitiger Marktzugang.]]>
# </wl:sentence>
# <wl:sentence
# wl:pos="NN NE NE VVFIN APPRART NN ART ADJA NN APPR NE $. &quot; APPRART NN VVFIN PPER ADJD ADJD PTKVZ $. &quot; ART ADJA NN VMFIN ADV APPR ART NN APPR NE VVFIN $, ART ADJD NN VVFIN APPR NN ADV PTKNEG PTKVZ $."
# wl:id="a4beee1292a24bfa73c7675a03f2d115"
# wl:token="0,21 22,25 26,34 35,40 41,44 45,54 55,58 59,66 67,84 85,87 88,95 95,96 97,98 98,100 101,107 108,114 115,118 119,127 128,131 132,137 137,138 138,139 140,143 144,152 153,162 163,169 170,174 175,178 179,182 183,189 190,192 193,203 204,215 215,216 217,220 221,228 229,235 236,241 242,246 247,257 258,262 263,268 269,273 273,274"
# wl:sem_orient="0.0"
# wl:significance="0.0">
# <![CDATA[US-Verhandlungsf??hrer Dan Mullaney sagte zum Abschluss der vierten Verhandlungsrunde in Br??ssel: ???Im Moment kommen wir wirklich gut voran.??? Die n??chsten Gespr??che sollen noch vor dem Sommer in Washington stattfinden, ein genauer Termin steht nach EU-Angaben noch nicht fest.]]>
# </wl:sentence>
# </wl:page>'''
# old_xml = '''
# <wl:page xmlns:wl="http://www.weblyzard.com/wl/2005"
# lang="de"
# title="Freihandelsgespr??che - Erleichterungen f??r kleine Firmen geplant"
# content_type="text/html"
# content_id="578351358"
# nilsimsa="73345e38061061454f686ac08fd498e05fa6421175d5a1d5f525ef48d77a322e">
# <wl:sentence
# pos_tags="None"
# sem_orient="0.721687836487"
# significance="839.529561215"
# md5sum="b6ec48367959b201fb07f421d0743e50"
# pos="NE $( NE ( NE ) $( ADJA KON ADJA NN VMFIN APPR NN APPRART NN APPR ART NE VVFIN $."
# token="0,7 7,8 8,18 19,20 20,27 27,28 29,30 31,37 38,41 42,50 51,62 63,69 70,73 74,89 90,94 95,101 102,105 106,109 110,113 114,120 120,121">
# <![CDATA[Br??ssel/Washington (APA/dpa) - Kleine und mittlere Unternehmen k??nnen auf Erleichterungen beim Handel mit den USA hoffen.]]>
# </wl:sentence>
# <wl:sentence
# pos_tags="None"
# sem_orient="0.68041381744"
# significance="298.191028195"
# md5sum="c1940778e578e6748046fe6f5eb06a9b"
# pos="APPR ART NN APPRART NN NN $( KON NN ( NE ) VVINF ART NN KON ART NE APPR ART ADJA NN $, PRELS PRF ADJA NN ( NE ) VVINF VMFIN $."
# token="0,3 4,7 8,21 22,25 26,43 44,51 51,52 53,56 57,77 78,79 79,83 83,84 85,96 97,100 101,103 104,107 108,111 112,115 116,120 121,124 125,132 133,140 140,141 142,145 146,150 151,168 169,180 181,182 182,185 185,186 187,193 194,198 198,199">
# <![CDATA[Bei den Verhandlungen zum Transatlantischen Handels- und Investitionsabkommen (TTIP) diskutieren die EU und die USA ??ber ein eigenes Kapitel, das sich mittelst??ndischen Unternehmen (KMU) widmen soll.]]>
# </wl:sentence>
# <wl:sentence
# pos_tags="None"
# sem_orient="1.0"
# significance="197.953352851"
# md5sum="e865ac842126627352d778df347a16db"
# pos="PDS VVFIN PIDAT NN APPR ART APPRART NN ADJA ADJA NN $."
# token="0,3 4,13 14,19 20,26 27,29 30,35 36,38 39,46 47,63 64,75 76,84 84,85">
# <![CDATA[Das schreiben beide Seiten in einem am Freitag ver??ffentlichten gemeinsamen Dokument.]]>
# </wl:sentence>
# <wl:sentence
# pos_tags="None"
# sem_orient="1.0"
# significance="0.0"
# md5sum="90d951f171fa74fbda2177341906cb77"
# pos="ART NN VAFIN ART ADJA ADJA NN $."
# token="0,3 4,8 9,12 13,16 17,27 28,41 42,53 53,54">
# <![CDATA[Das Ziel sei ein leichterer gegenseitiger Marktzugang.]]>
# </wl:sentence>
# <wl:sentence
# pos_tags="None"
# sem_orient="0.785674201318"
# significance="1370.67991114"
# md5sum="27045cb5143ba9726e767d6df80afafd"
# pos="NN NE NE VVFIN APPRART NN ART ADJA NN APPR NE $. XY APPRART NN VVFIN PPER ADJD ADJD PTKVZ $. XY ART ADJA NN VMFIN ADV APPR ART NN APPR NE VVFIN $, ART ADJD NN VVFIN APPR NN ADV PTKNEG PTKVZ $."
# token="0,21 22,25 26,34 35,40 41,44 45,54 55,58 59,66 67,84 85,87 88,95 95,96 97,98 98,100 101,107 108,114 115,118 119,127 128,131 132,137 137,138 138,139 140,143 144,152 153,162 163,169 170,174 175,178 179,182 183,189 190,192 193,203 204,215 215,216 217,220 221,228 229,235 236,241 242,246 247,257 258,262 263,268 269,273 273,274">
# <![CDATA[US-Verhandlungsf??hrer Dan Mullaney sagte zum Abschluss der vierten Verhandlungsrunde in Br??ssel: ???Im Moment kommen wir wirklich gut voran.??? Die n??chsten Gespr??che sollen noch vor dem Sommer in Washington stattfinden, ein genauer Termin steht nach EU-Angaben noch nicht fest.]]>
# </wl:sentence>
# </wl:page>'''
#
# old_xml_obj = XMLContent(xml_content=old_xml)
# old_xml_str = old_xml_obj.get_xml_document(xml_version=2005)
# assert old_xml_obj.xml_version == 2005
# assert 'content_id="578351358"' in old_xml_str
# assert len(old_xml_obj.titles) == 1
#
# new_xml_obj = XMLContent(xml_content=new_xml)
# new_xml_str = new_xml_obj.get_xml_document()
# assert new_xml_obj.xml_version == 2013
# assert len(new_xml_obj.titles) == 1
#
# assert 'wl:id="578351358"' in new_xml_str
#
# assert len(old_xml_obj.sentences) == len(new_xml_obj.sentences)
#
# xml_test_obj = XMLContent(xml_content=new_xml_obj.get_xml_document())
# assert xml_test_obj.xml_version == 2013
#
# print new_xml_obj.get_xml_document()
# print new_xml_obj.get_xml_document(xml_version=2005)
#
# xml_converted = xml_test_obj.get_xml_document(xml_version=2005)
#
# old_xml_obj2 = XMLContent(xml_content=xml_converted)
#
# assert old_xml_obj2.xml_version == 2005
# assert len(old_xml_obj2.sentences) == 5
# assert len(old_xml_obj2.titles) == 1
def test_as_dict(self):
''' tests exporting the document as dict '''
xml_content = '''<wl:page xmlns:wl="http://www.weblyzard.com/wl/2005" content_id="495692737" lang="en" nilsimsa="5bb001c8a610a105b1120bb9c4889d33c62b19e1493245cc2f252a83e270646b" title="Keystone report leaves environmental, energy, safety debates far from settled" source_id="12830" jonas_type="http" description="WASHINGTON &mdash; The State Department minimized the climate change impact of building the Keystone XL pipeline in its final environmental review issued on Friday, a key finding as President Barack Obama decides whether to approve the controversial project. Olivier Douliery | Abaca Press/MCT Activists engage in civil disobedience Wednesday, February 13, 2013 at the White House in Washington, D.C., in hopes of pressuring President Barack Obama to reject the Keystone XL oil sands pipeline. http://media.mcclatchydc.com/smedia/2014/01/31/17/06/SoIRM.La.91.jpg " style="border-left:2px solid #dddddd; padding-left:5px;max-width:100%;"> More News Read more Politics However, the review leaves the..." feed_url="http://rss.wn.com/english/keyword/" original_request_url="http://article.wn.com/view/2014/02/01/Keystone_report_leaves_environmental_energy_safety_debates_f_1/" content_type="text/html">
<wl:sentence pos_tags="None" sem_orient="0.0" significance="12951.7567942" md5sum="0c8cb136073a20a932f2d6748204ce9b" pos="NNP CD ( NN ) : DT NNP NNP POS JJ JJ NN IN DT NN NN IN DT JJ NN NNS TO DT NNP NNP NNP VBZ VBN PRP VBP IN DT JJ CC JJ NN IN NNP NNP VBZ DT NN IN DT NN ." token="0,4 5,7 8,9 9,18 18,19 20,22 23,26 27,32 33,43 43,45 46,51 52,65 66,76 77,79 80,83 84,92 93,101 102,106 107,110 111,119 120,123 124,129 130,132 133,136 137,141 142,146 147,152 153,155 156,158 159,161 162,166 167,169 170,173 174,187 188,191 192,201 202,208 209,211 212,221 222,227 228,239 240,243 244,256 257,259 260,263 264,272 272,273"><![CDATA[Dec. 23 (Bloomberg) -- The State Department's final environmental assessment of the Keystone pipeline from the Canadian tar sands to the U.S. Gulf Coast is c. We look at the environmental and political impact if President Obama greenlights the construction of the pipeline.]]></wl:sentence>
<wl:sentence pos_tags="None" sem_orient="0.0" significance="0.0" md5sum="cdc2b1edeec27081819ca4f50e067240" pos="NNP NNP VBZ VBN IN NNS : NNS ." token="0,6 7,15 16,18 19,25 26,28 29,35 35,36 37,42 42,43"><![CDATA[Shihab Rattansi is joined by guests: clima.]]></wl:sentence>
</wl:page>'''
expected_result = {'id': 495692737, 'lang': 'en',
'sentence': [{'id': '0c8cb136073a20a932f2d6748204ce9b',
'token': '0,4 5,7 8,9 9,18 18,19 20,22 23,26 27,32 33,43 43,45 46,51 52,65 66,76 77,79 80,83 84,92 93,101 102,106 107,110 111,119 120,123 124,129 130,132 133,136 137,141 142,146 147,152 153,155 156,158 159,161 162,166 167,169 170,173 174,187 188,191 192,201 202,208 209,211 212,221 222,227 228,239 240,243 244,256 257,259 260,263 264,272 272,273',
'value': '''Dec. 23 (Bloomberg) -- The State Department's final environmental assessment of the Keystone pipeline from the Canadian tar sands to the U.S. Gulf Coast is c. We look at the environmental and political impact if President Obama greenlights the construction of the pipeline.''',
'pos': 'NNP CD ( NN ) : DT NNP NNP POS JJ JJ NN IN DT NN NN IN DT JJ NN NNS TO DT NNP NNP NNP VBZ VBN PRP VBP IN DT JJ CC JJ NN IN NNP NNP VBZ DT NN IN DT NN .'},
{'id': 'cdc2b1edeec27081819ca4f50e067240',
'token': '0,6 7,15 16,18 19,25 26,28 29,35 35,36 37,42 42,43',
'value': 'Shihab Rattansi is joined by guests: clima.',
'pos': 'NNP NNP VBZ VBN IN NNS : NNS .'}]}
xml_obj = XMLContent(xml_content)
attr_mapping = {'content_id': 'id',
'lang': 'lang',
'sentences': 'sentence',
'sentences_map': {'pos': 'pos',
'token': 'token',
'md5sum': 'id',
'value': 'value'}}
result = xml_obj.as_dict(mapping=attr_mapping)
print 'result: '
pprint(result)
print 'expected result'
pprint(expected_result)
assert result == expected_result
if __name__ == '__main__':
unittest.main()
new: support for adding titles to the sentences
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Created on Feb, 27 2013
@author: heinz-peterlang
albert weichselbraun
Handles the new (http://www.weblyzard.com/wl/2013#) weblyzard
XML format.
Functions added:
- support for sentence tokens and pos iterators
Remove functions:
- compatibility fixes for namespaces, encodings etc.
- support for the old POS tags mapping.
'''
import logging
from lxml import etree
from pprint import pprint
import unittest
import hashlib
from weblyzard_api.xml_content.parsers.xml_2005 import XML2005
from weblyzard_api.xml_content.parsers.xml_2013 import XML2013
from weblyzard_api.xml_content.parsers.xml_deprecated import XMLDeprecated
SENTENCE_ATTRIBUTES = ('pos_tags', 'sem_orient', 'significance', 'md5sum',
'pos', 'token')
class Sentence(object):
'''
The sentence class used for accessing single sentences.
Note: the class provides convenient properties for accessing pos tags
and tokens:
.sentence: sentence text
.tokens : provides a list of tokens (e.g. ['A', 'new', 'day'])
.pos_tags: provides a list of pos tags (e.g. ['DET', 'CC', 'NN'])
'''
def __init__(self, md5sum=None, pos=None, sem_orient=None, significance=None,
token=None, value=None, is_title=False, dependencies=None):
if not md5sum and value:
try:
m = hashlib.md5()
m.update(value.encode('utf-8') if isinstance(value, unicode) else str(value))
md5sum = m.hexdigest()
except Exception, e:
print e
self.md5sum = md5sum
self.pos = pos
self.sem_orient = sem_orient
self.significance = significance
self.token = token
self.value = value
self.is_title = is_title
self.dependencies = dependencies
def as_dict(self):
return dict((k, v) for k, v in self.__dict__.iteritems() if not k.startswith('_'))
class XMLContent(object):
SUPPORTED_XML_VERSIONS = {XML2005.VERSION: XML2005,
XML2013.VERSION: XML2013,
XMLDeprecated.VERSION: XMLDeprecated}
def __init__(self, xml_content):
self.xml_version = None
self.attributes = {}
self.sentence_objects = []
self.titles = []
result = self.parse_xml_content(xml_content)
if result:
self.xml_version, self.attributes, self.sentence_objects, self.titles = result
@classmethod
def parse_xml_content(cls, xml_content):
xml_version = cls.get_xml_version(xml_content)
if not xml_version or not xml_content:
return None
sentence_objects = []
parser = cls.SUPPORTED_XML_VERSIONS[xml_version]
attributes, sentences = parser.parse(xml_content)
if 'title' in attributes:
titles = [Sentence(value=attributes['title'], is_title=True)]
else:
titles = []
for sentence in sentences:
sent_obj = Sentence(**sentence)
if sent_obj.is_title:
titles.append(sent_obj)
else:
sentence_objects.append(sent_obj)
return xml_version, attributes, sentence_objects, titles
@classmethod
def get_xml_version(cls, xml_content):
if not xml_content:
return None
for version, xml_parser in cls.SUPPORTED_XML_VERSIONS.iteritems():
if xml_parser.is_supported(xml_content):
return version
def get_xml_document(self, header_fields='all',
sentence_attributes=SENTENCE_ATTRIBUTES,
xml_version=XML2013.VERSION):
if not xml_version:
xml_version = self.xml_version
return self.SUPPORTED_XML_VERSIONS[xml_version].dump_xml(titles=self.titles,
attributes=self.attributes,
sentences=self.sentences)
def get_plain_text(self):
''' returns the plain text of the XML content '''
if not len(self.sentences):
return ''
return '\n'.join([s.value for s in self.sentences if not s.is_title])
@classmethod
def get_text(cls, text):
''' encodes the text '''
if isinstance(text, str):
text = text.decode('utf-8')
return text
def add_attribute(self, key, value):
if not self.attributes:
self.attributes = {}
self.attributes[key] = value
def update_attributes(self, new_attributes):
''' updates the existing attributes with new ones '''
# not using dict.update to allow advanced processing
if not new_attributes or not isinstance(new_attributes, dict):
return
for k, v in new_attributes.iteritems():
self.attributes[str(k)] = v
def as_dict(self, mapping=None,
ignore_non_sentence=False, add_titles_to_sentences=False):
''' convert the XML content to a dictionary.
:param mapping, an optional mapping by which to restrict/rename
the returned dictionary
:param ignore_non_sentence: if true: sentences without without POS tags
are omitted from the result
'''
# try:?
if True:
assert mapping, 'got no mapping'
result = self.apply_dict_mapping(self.attributes, mapping)
sentence_attr_name = mapping['sentences'] if 'sentences' in mapping else 'sentences'
if 'sentences_map' in mapping:
result[sentence_attr_name] = []
sent_mapping = mapping['sentences_map']
if add_titles_to_sentences and len(self.titles):
sentences = self.titles + self.sentences
else:
sentences = self.sentences
for sent in sentences:
if ignore_non_sentence and not sent.pos:
continue
sent_attributes = self.apply_dict_mapping(sent.as_dict(),
sent_mapping)
result[sentence_attr_name].append(sent_attributes)
# except Exception, e:
# print e
# result = self.attributes
# result.update({'sentences': [sent.as_dict() for sent in self.sentences]})
return result
@classmethod
def apply_dict_mapping(cls, attributes, mapping=None):
result = attributes
if mapping:
result = {}
for attr, value in attributes.iteritems():
if attr in mapping:
result[mapping[attr]] = value
return result
def _get_attribute(self, attr_name):
''' @return: the attribute for the given name '''
return self.attributes.get(attr_name, None)
def get_nilsimsa(self):
return self._get_attribute('nilsimsa')
def get_content_type(self):
return self._get_attribute('content_type')
def get_title(self):
return self._get_attribute('title')
def get_lang(self):
return self._get_attribute('lang')
def get_content_id(self):
content_id = self._get_attribute('content_id')
return int(content_id) if content_id else content_id
def get_sentences(self):
return self.sentence_objects
def update_sentences(self, sentences):
''' updates the values of the existing sentences. if the list of
sentence object is empty, sentence_objects will be set to the new
sentences. WARNING: this function will not add new sentences
:param sentences: list of Sentence objects
'''
if not self.sentence_objects:
self.sentence_objects = sentences
else:
sentence_dict = dict((sent.md5sum, sent) for sent in sentences)
for sentence in self.sentence_objects:
if sentence.md5sum in sentence_dict:
new_sentence = sentence_dict[sentence.md5sum]
for attrib in SENTENCE_ATTRIBUTES:
new_value = getattr(new_sentence, attrib)
if new_value:
setattr(sentence, attrib, new_value)
sentences = property(get_sentences, update_sentences)
plain_text = property(get_plain_text)
nilsimsa = property(get_nilsimsa)
content_type = property(get_content_type)
title = property(get_title)
lang = property(get_lang)
content_id = property(get_content_id)
class TestXMLContent(unittest.TestCase):
def setUp(self):
'''
:
'''
self.xml_content = '''
<wl:page xmlns:wl="http://www.weblyzard.com/" content_id="228557824" content_type="text/html" lang="DE" title="Der ganze Wortlaut: Offener Brief an Niko Pelinka | Heute.at ">
<wl:sentence id="7e985ffb692bb6f617f25619ecca39a9"><![CDATA[Ich hasse scheiß encodings .... ]]></wl:sentence>
<wl:sentence id="7e985ffb692bb6f617f25619ecca3910"><![CDATA[Pöses ärbeiten am Wochenende ... scheiß encodings ]]></wl:sentence>
</wl:page> '''
# def test_update_sentences(self):
# xml_content = self.xml_content
# sentences = [Sentence('7e985ffb692bb6f617f25619ecca39a9'),
# Sentence('7e985ffb692bb6f617f25619ecca3910')]
#
# for s in sentences:
# s.pos_tags = 'nn nn'
# s.significance = 3
# s.sem_orient = 1
#
# xml = XMLContent(xml_content)
#
# print xml.get_xml_document()
#
# for sentence in xml.sentences:
# print sentence.md5sum, sentence.value, sentence.significance
#
# xml.sentences = sentences
#
# xml_out = xml.get_xml_document()
#
# for sentence in xml.sentences:
# assert sentence.significance == 3
# assert sentence.sem_orient == 1
#
# assert 'CDATA' in xml_out
#
# def test_double_sentences(self):
# xml_content = '''
# <wl:page xmlns:wl="http://www.weblyzard.com/" content_id="228557824" content_type="text/html" lang="DE" title="Der ganze Wortlaut: Offener Brief an Niko Pelinka | Heute.at ">
# <wl:sentence id="7e985ffb692bb6f617f25619ecca39a9"><![CDATA[Der ganze Wortlaut]]></wl:sentence>
# <wl:sentence id="7e985ffb692bb6f617f25619ecca39a9"><![CDATA[Der ganze Wortlaut]]></wl:sentence>
# </wl:page> '''
#
# xml = XMLContent(xml_content)
# assert len(xml.sentences) == 1, 'got %s sentences' % len(xml.sentences)
# xml_out = xml.get_xml_document()
# assert 'CDATA' in xml_out
#
# def test_empty_content(self):
# xml = XMLContent(None)
# assert '' == xml.get_plain_text()
# assert [] == xml.get_sentences()
#
# # def test_pos_tags(self):
# # xml = XMLContent(self.xml_content)
# # for sentence in xml.sentences:
# # sentence.pos_tags = 'NN NN AA'
# #
# # rdf = xml.get_pos_tags()
# # assert '<rdf:RDF xmlns' in rdf
#
# def test_attributes(self):
# ''' '''
# xml = XMLContent(self.xml_content)
#
# assert 'Der ganze Wortlaut' in xml.title
# assert xml.lang == 'DE'
# assert xml.content_type == 'text/html'
# assert xml.nilsimsa == None
# assert xml.content_id == 228557824
#
#
# def test_supported_version(self):
#
# new_xml = '''
# <wl:page xmlns:wl="http://www.weblyzard.com/wl/2013#"
# xmlns:dc="http://purl.org/dc/elements/1.1/"
# wl:id="578351358"
# dc:format="text/html"
# xml:lang="de"
# wl:nilsimsa="37345e380610614cc7696ac08ed098e05fa64211755da1d4f525ef4cd762726e">
# <wl:sentence
# wl:pos="NN $( NN APPR ADJA NN VVPP" wl:id="b42bb3f2cb7ed667ba311811823f37cf"
# wl:token="0,20 21,22 23,38 39,42 43,49 50,56 57,64"
# wl:sem_orient="0.0"
# wl:significance="0.0"
# wl:is_title="true">
# <![CDATA[Freihandelsgespr??che - Erleichterungen f??r kleine Firmen geplant]]>
# </wl:sentence>
# <wl:sentence
# wl:pos="NE $( NE ( NE ) $( ADJA KON ADJA NN VMFIN APPR NN APPRART NN APPR ART NE VVFIN $."
# wl:id="05e9a90a82d67702cc19af457222c5b6"
# wl:token="0,7 7,8 8,18 19,20 20,27 27,28 29,30 31,37 38,41 42,50 51,62 63,69 70,73 74,89 90,94 95,101 102,105 106,109 110,113 114,120 120,121"
# wl:sem_orient="0.0" wl:significance="0.0">
# <![CDATA[Br??ssel/Washington (APA/dpa) - Kleine und mittlere Unternehmen k??nnen auf Erleichterungen beim Handel mit den USA hoffen.]]>
# </wl:sentence>
# <wl:sentence
# wl:pos="APPR ART NN APPRART NN NN $( KON NN ( NE ) VVINF ART NN KON ART NE APPR ART ADJA NN $, PRELS PRF ADJA NN ( NE ) VVINF VMFIN $."
# wl:id="9469bb40cbcbb8fba2e31567e135d43d"
# wl:token="0,3 4,7 8,21 22,25 26,43 44,51 51,52 53,56 57,77 78,79 79,83 83,84 85,96 97,100 101,103 104,107 108,111 112,115 116,120 121,124 125,132 133,140 140,141 142,145 146,150 151,168 169,180 181,182 182,185 185,186 187,193 194,198 198,199"
# wl:sem_orient="0.0"
# wl:significance="0.0">
# <![CDATA[Bei den Verhandlungen zum Transatlantischen Handels- und Investitionsabkommen (TTIP) diskutieren die EU und die USA ??ber ein eigenes Kapitel, das sich mittelst??ndischen Unternehmen (KMU) widmen soll.]]>
# </wl:sentence>
# <wl:sentence
# wl:pos="PDS VVFIN PIDAT NN APPR ART APPRART NN ADJA ADJA NN $."
# wl:id="a3e062af32c8b12f4c42ffd57063f531"
# wl:token="0,3 4,13 14,19 20,26 27,29 30,35 36,38 39,46 47,63 64,75 76,84 84,85"
# wl:sem_orient="0.0"
# wl:significance="0.0">
# <![CDATA[Das schreiben beide Seiten in einem am Freitag ver??ffentlichten gemeinsamen Dokument.]]>
# </wl:sentence>
# <wl:sentence
# wl:pos="ART NN VAFIN ART ADJA ADJA NN $."
# wl:id="90d951f171fa74fbda2177341906cb77"
# wl:token="0,3 4,8 9,12 13,16 17,27 28,41 42,53 53,54"
# wl:sem_orient="0.0"
# wl:significance="0.0">
# <![CDATA[Das Ziel sei ein leichterer gegenseitiger Marktzugang.]]>
# </wl:sentence>
# <wl:sentence
# wl:pos="NN NE NE VVFIN APPRART NN ART ADJA NN APPR NE $. &quot; APPRART NN VVFIN PPER ADJD ADJD PTKVZ $. &quot; ART ADJA NN VMFIN ADV APPR ART NN APPR NE VVFIN $, ART ADJD NN VVFIN APPR NN ADV PTKNEG PTKVZ $."
# wl:id="a4beee1292a24bfa73c7675a03f2d115"
# wl:token="0,21 22,25 26,34 35,40 41,44 45,54 55,58 59,66 67,84 85,87 88,95 95,96 97,98 98,100 101,107 108,114 115,118 119,127 128,131 132,137 137,138 138,139 140,143 144,152 153,162 163,169 170,174 175,178 179,182 183,189 190,192 193,203 204,215 215,216 217,220 221,228 229,235 236,241 242,246 247,257 258,262 263,268 269,273 273,274"
# wl:sem_orient="0.0"
# wl:significance="0.0">
# <![CDATA[US-Verhandlungsf??hrer Dan Mullaney sagte zum Abschluss der vierten Verhandlungsrunde in Br??ssel: ???Im Moment kommen wir wirklich gut voran.??? Die n??chsten Gespr??che sollen noch vor dem Sommer in Washington stattfinden, ein genauer Termin steht nach EU-Angaben noch nicht fest.]]>
# </wl:sentence>
# </wl:page>'''
# old_xml = '''
# <wl:page xmlns:wl="http://www.weblyzard.com/wl/2005"
# lang="de"
# title="Freihandelsgespr??che - Erleichterungen f??r kleine Firmen geplant"
# content_type="text/html"
# content_id="578351358"
# nilsimsa="73345e38061061454f686ac08fd498e05fa6421175d5a1d5f525ef48d77a322e">
# <wl:sentence
# pos_tags="None"
# sem_orient="0.721687836487"
# significance="839.529561215"
# md5sum="b6ec48367959b201fb07f421d0743e50"
# pos="NE $( NE ( NE ) $( ADJA KON ADJA NN VMFIN APPR NN APPRART NN APPR ART NE VVFIN $."
# token="0,7 7,8 8,18 19,20 20,27 27,28 29,30 31,37 38,41 42,50 51,62 63,69 70,73 74,89 90,94 95,101 102,105 106,109 110,113 114,120 120,121">
# <![CDATA[Br??ssel/Washington (APA/dpa) - Kleine und mittlere Unternehmen k??nnen auf Erleichterungen beim Handel mit den USA hoffen.]]>
# </wl:sentence>
# <wl:sentence
# pos_tags="None"
# sem_orient="0.68041381744"
# significance="298.191028195"
# md5sum="c1940778e578e6748046fe6f5eb06a9b"
# pos="APPR ART NN APPRART NN NN $( KON NN ( NE ) VVINF ART NN KON ART NE APPR ART ADJA NN $, PRELS PRF ADJA NN ( NE ) VVINF VMFIN $."
# token="0,3 4,7 8,21 22,25 26,43 44,51 51,52 53,56 57,77 78,79 79,83 83,84 85,96 97,100 101,103 104,107 108,111 112,115 116,120 121,124 125,132 133,140 140,141 142,145 146,150 151,168 169,180 181,182 182,185 185,186 187,193 194,198 198,199">
# <![CDATA[Bei den Verhandlungen zum Transatlantischen Handels- und Investitionsabkommen (TTIP) diskutieren die EU und die USA ??ber ein eigenes Kapitel, das sich mittelst??ndischen Unternehmen (KMU) widmen soll.]]>
# </wl:sentence>
# <wl:sentence
# pos_tags="None"
# sem_orient="1.0"
# significance="197.953352851"
# md5sum="e865ac842126627352d778df347a16db"
# pos="PDS VVFIN PIDAT NN APPR ART APPRART NN ADJA ADJA NN $."
# token="0,3 4,13 14,19 20,26 27,29 30,35 36,38 39,46 47,63 64,75 76,84 84,85">
# <![CDATA[Das schreiben beide Seiten in einem am Freitag ver??ffentlichten gemeinsamen Dokument.]]>
# </wl:sentence>
# <wl:sentence
# pos_tags="None"
# sem_orient="1.0"
# significance="0.0"
# md5sum="90d951f171fa74fbda2177341906cb77"
# pos="ART NN VAFIN ART ADJA ADJA NN $."
# token="0,3 4,8 9,12 13,16 17,27 28,41 42,53 53,54">
# <![CDATA[Das Ziel sei ein leichterer gegenseitiger Marktzugang.]]>
# </wl:sentence>
# <wl:sentence
# pos_tags="None"
# sem_orient="0.785674201318"
# significance="1370.67991114"
# md5sum="27045cb5143ba9726e767d6df80afafd"
# pos="NN NE NE VVFIN APPRART NN ART ADJA NN APPR NE $. XY APPRART NN VVFIN PPER ADJD ADJD PTKVZ $. XY ART ADJA NN VMFIN ADV APPR ART NN APPR NE VVFIN $, ART ADJD NN VVFIN APPR NN ADV PTKNEG PTKVZ $."
# token="0,21 22,25 26,34 35,40 41,44 45,54 55,58 59,66 67,84 85,87 88,95 95,96 97,98 98,100 101,107 108,114 115,118 119,127 128,131 132,137 137,138 138,139 140,143 144,152 153,162 163,169 170,174 175,178 179,182 183,189 190,192 193,203 204,215 215,216 217,220 221,228 229,235 236,241 242,246 247,257 258,262 263,268 269,273 273,274">
# <![CDATA[US-Verhandlungsf??hrer Dan Mullaney sagte zum Abschluss der vierten Verhandlungsrunde in Br??ssel: ???Im Moment kommen wir wirklich gut voran.??? Die n??chsten Gespr??che sollen noch vor dem Sommer in Washington stattfinden, ein genauer Termin steht nach EU-Angaben noch nicht fest.]]>
# </wl:sentence>
# </wl:page>'''
#
# old_xml_obj = XMLContent(xml_content=old_xml)
# old_xml_str = old_xml_obj.get_xml_document(xml_version=2005)
# assert old_xml_obj.xml_version == 2005
# assert 'content_id="578351358"' in old_xml_str
# assert len(old_xml_obj.titles) == 1
#
# new_xml_obj = XMLContent(xml_content=new_xml)
# new_xml_str = new_xml_obj.get_xml_document()
# assert new_xml_obj.xml_version == 2013
# assert len(new_xml_obj.titles) == 1
#
# assert 'wl:id="578351358"' in new_xml_str
#
# assert len(old_xml_obj.sentences) == len(new_xml_obj.sentences)
#
# xml_test_obj = XMLContent(xml_content=new_xml_obj.get_xml_document())
# assert xml_test_obj.xml_version == 2013
#
# print new_xml_obj.get_xml_document()
# print new_xml_obj.get_xml_document(xml_version=2005)
#
# xml_converted = xml_test_obj.get_xml_document(xml_version=2005)
#
# old_xml_obj2 = XMLContent(xml_content=xml_converted)
#
# assert old_xml_obj2.xml_version == 2005
# assert len(old_xml_obj2.sentences) == 5
# assert len(old_xml_obj2.titles) == 1
def test_as_dict(self):
''' tests exporting the document as dict '''
xml_content = '''<wl:page xmlns:wl="http://www.weblyzard.com/wl/2005" content_id="495692737" lang="en" nilsimsa="5bb001c8a610a105b1120bb9c4889d33c62b19e1493245cc2f252a83e270646b" title="Keystone report leaves environmental, energy, safety debates far from settled" source_id="12830" jonas_type="http" description="WASHINGTON &mdash; The State Department minimized the climate change impact of building the Keystone XL pipeline in its final environmental review issued on Friday, a key finding as President Barack Obama decides whether to approve the controversial project. Olivier Douliery | Abaca Press/MCT Activists engage in civil disobedience Wednesday, February 13, 2013 at the White House in Washington, D.C., in hopes of pressuring President Barack Obama to reject the Keystone XL oil sands pipeline. http://media.mcclatchydc.com/smedia/2014/01/31/17/06/SoIRM.La.91.jpg " style="border-left:2px solid #dddddd; padding-left:5px;max-width:100%;"> More News Read more Politics However, the review leaves the..." feed_url="http://rss.wn.com/english/keyword/" original_request_url="http://article.wn.com/view/2014/02/01/Keystone_report_leaves_environmental_energy_safety_debates_f_1/" content_type="text/html">
<wl:sentence pos_tags="None" sem_orient="0.0" significance="12951.7567942" md5sum="0c8cb136073a20a932f2d6748204ce9b" pos="NNP CD ( NN ) : DT NNP NNP POS JJ JJ NN IN DT NN NN IN DT JJ NN NNS TO DT NNP NNP NNP VBZ VBN PRP VBP IN DT JJ CC JJ NN IN NNP NNP VBZ DT NN IN DT NN ." token="0,4 5,7 8,9 9,18 18,19 20,22 23,26 27,32 33,43 43,45 46,51 52,65 66,76 77,79 80,83 84,92 93,101 102,106 107,110 111,119 120,123 124,129 130,132 133,136 137,141 142,146 147,152 153,155 156,158 159,161 162,166 167,169 170,173 174,187 188,191 192,201 202,208 209,211 212,221 222,227 228,239 240,243 244,256 257,259 260,263 264,272 272,273"><![CDATA[Dec. 23 (Bloomberg) -- The State Department's final environmental assessment of the Keystone pipeline from the Canadian tar sands to the U.S. Gulf Coast is c. We look at the environmental and political impact if President Obama greenlights the construction of the pipeline.]]></wl:sentence>
<wl:sentence pos_tags="None" sem_orient="0.0" significance="0.0" md5sum="cdc2b1edeec27081819ca4f50e067240" pos="NNP NNP VBZ VBN IN NNS : NNS ." token="0,6 7,15 16,18 19,25 26,28 29,35 35,36 37,42 42,43"><![CDATA[Shihab Rattansi is joined by guests: clima.]]></wl:sentence>
</wl:page>'''
expected_result = {'id': 495692737, 'lang': 'en',
'sentence': [{'id': '0c8cb136073a20a932f2d6748204ce9b',
'token': '0,4 5,7 8,9 9,18 18,19 20,22 23,26 27,32 33,43 43,45 46,51 52,65 66,76 77,79 80,83 84,92 93,101 102,106 107,110 111,119 120,123 124,129 130,132 133,136 137,141 142,146 147,152 153,155 156,158 159,161 162,166 167,169 170,173 174,187 188,191 192,201 202,208 209,211 212,221 222,227 228,239 240,243 244,256 257,259 260,263 264,272 272,273',
'value': '''Dec. 23 (Bloomberg) -- The State Department's final environmental assessment of the Keystone pipeline from the Canadian tar sands to the U.S. Gulf Coast is c. We look at the environmental and political impact if President Obama greenlights the construction of the pipeline.''',
'pos': 'NNP CD ( NN ) : DT NNP NNP POS JJ JJ NN IN DT NN NN IN DT JJ NN NNS TO DT NNP NNP NNP VBZ VBN PRP VBP IN DT JJ CC JJ NN IN NNP NNP VBZ DT NN IN DT NN .'},
{'id': 'cdc2b1edeec27081819ca4f50e067240',
'token': '0,6 7,15 16,18 19,25 26,28 29,35 35,36 37,42 42,43',
'value': 'Shihab Rattansi is joined by guests: clima.',
'pos': 'NNP NNP VBZ VBN IN NNS : NNS .'}]}
xml_obj = XMLContent(xml_content)
attr_mapping = {'content_id': 'id',
'lang': 'lang',
'sentences': 'sentence',
'sentences_map': {'pos': 'pos',
'token': 'token',
'md5sum': 'id',
'value': 'value'}}
result = xml_obj.as_dict(mapping=attr_mapping)
print 'result: '
pprint(result)
print 'expected result'
pprint(expected_result)
assert result == expected_result
# add the titles
result2 = xml_obj.as_dict(mapping=attr_mapping,
add_titles_to_sentences=True)
assert len(result2['sentence']) == 3
# ignore non-sentences (without pos tags)
result3 = xml_obj.as_dict(mapping=attr_mapping,
ignore_non_sentence=True,
add_titles_to_sentences=True)
assert len(result3['sentence']) == 2
if __name__ == '__main__':
unittest.main()
|
__version__ = "0.3.15"
Bumped version to 0.3.16 [ci skip]
__version__ = "0.3.16"
|
# ccm clusters
from six import print_, iteritems
from six.moves import xrange
import yaml
import os
import re
import subprocess
import shutil
import time
from ccmlib import common, repository
from ccmlib.node import Node, NodeError
from ccmlib.bulkloader import BulkLoader
class Cluster():
def __init__(self, path, name, partitioner=None, cassandra_dir=None, create_directory=True, cassandra_version=None, verbose=False):
self.name = name
self.nodes = {}
self.seeds = []
self.partitioner = partitioner
self._config_options = {}
self.__log_level = "INFO"
self.__path = path
self.__version = None
self.use_vnodes = False
if create_directory:
# we create the dir before potentially downloading to throw an error sooner if need be
os.mkdir(self.get_path())
try:
if cassandra_version is None:
# at this point, cassandra_dir should always not be None, but
# we keep this for backward compatibility (in loading old cluster)
if cassandra_dir is not None:
if common.is_win():
self.__cassandra_dir = cassandra_dir
else:
self.__cassandra_dir = os.path.abspath(cassandra_dir)
self.__version = self.__get_version_from_build()
else:
dir, v = repository.setup(cassandra_version, verbose)
self.__cassandra_dir = dir
self.__version = v if v is not None else self.__get_version_from_build()
if create_directory:
common.validate_cassandra_dir(self.__cassandra_dir)
self.__update_config()
except:
if create_directory:
shutil.rmtree(self.get_path())
raise
def set_partitioner(self, partitioner):
self.partitioner = partitioner
self.__update_config()
return self
def set_cassandra_dir(self, cassandra_dir=None, cassandra_version=None, verbose=False):
if cassandra_version is None:
self.__cassandra_dir = cassandra_dir
common.validate_cassandra_dir(cassandra_dir)
self.__version = self.__get_version_from_build()
else:
dir, v = repository.setup(cassandra_version, verbose)
self.__cassandra_dir = dir
self.__version = v if v is not None else self.__get_version_from_build()
self.__update_config()
for node in list(self.nodes.values()):
node.import_config_files()
# if any nodes have a data center, let's update the topology
if any( [node.data_center for node in self.nodes.values()] ):
self.__update_topology_files()
return self
def get_cassandra_dir(self):
common.validate_cassandra_dir(self.__cassandra_dir)
return self.__cassandra_dir
def nodelist(self):
return [ self.nodes[name] for name in sorted(self.nodes.keys()) ]
def version(self):
return self.__version
@staticmethod
def load(path, name):
cluster_path = os.path.join(path, name)
filename = os.path.join(cluster_path, 'cluster.conf')
with open(filename, 'r') as f:
data = yaml.load(f)
try:
cassandra_dir = None
if 'cassandra_dir' in data:
cassandra_dir = data['cassandra_dir']
repository.validate(cassandra_dir)
cluster = Cluster(path, data['name'], cassandra_dir=cassandra_dir, create_directory=False)
node_list = data['nodes']
seed_list = data['seeds']
if 'partitioner' in data:
cluster.partitioner = data['partitioner']
if 'config_options' in data:
cluster._config_options = data['config_options']
if 'log_level' in data:
cluster.__log_level = data['log_level']
if 'use_vnodes' in data:
cluster.use_vnodes = data['use_vnodes']
except KeyError as k:
raise common.LoadError("Error Loading " + filename + ", missing property:" + k)
for node_name in node_list:
cluster.nodes[node_name] = Node.load(cluster_path, node_name, cluster)
for seed_name in seed_list:
cluster.seeds.append(cluster.nodes[seed_name])
return cluster
def add(self, node, is_seed, data_center=None):
if node.name in self.nodes:
raise common.ArgumentError('Cannot create existing node %s' % node.name)
self.nodes[node.name] = node
if is_seed:
self.seeds.append(node)
self.__update_config()
node.data_center = data_center
node.set_log_level(self.__log_level)
if data_center is not None:
self.__update_topology_files()
node._save()
return self
def populate(self, nodes, debug=False, tokens=None, use_vnodes=False, ipprefix='127.0.0.'):
node_count = nodes
dcs = []
self.use_vnodes = use_vnodes
if isinstance(nodes, list):
self.set_configuration_options(values={'endpoint_snitch' : 'org.apache.cassandra.locator.PropertyFileSnitch'})
node_count = 0
i = 0
for c in nodes:
i = i + 1
node_count = node_count + c
for x in xrange(0, c):
dcs.append('dc%d' % i)
if node_count < 1:
raise common.ArgumentError('invalid node count %s' % nodes)
for i in xrange(1, node_count + 1):
if 'node%s' % i in list(self.nodes.values()):
raise common.ArgumentError('Cannot create existing node node%s' % i)
if tokens is None and not use_vnodes:
tokens = self.balanced_tokens(node_count)
for i in xrange(1, node_count + 1):
tk = None
if tokens is not None and i-1 < len(tokens):
tk = tokens[i-1]
dc = dcs[i-1] if i-1 < len(dcs) else None
binary = None
if self.version() >= '1.2':
binary = ('%s%s' % (ipprefix, i), 9042)
node = Node('node%s' % i,
self,
False,
('%s%s' % (ipprefix, i), 9160),
('%s%s' % (ipprefix, i), 7000),
str(7000 + i * 100),
(str(0), str(2000 + i * 100))[debug == True],
tk,
binary_interface=binary)
self.add(node, True, dc)
self.__update_config()
return self
def balanced_tokens(self, node_count):
if self.version() >= '1.2' and not self.partitioner:
ptokens = [(i*(2**64//node_count)) for i in xrange(0, node_count)]
return [int(t - 2**63) for t in ptokens]
return [ int(i*(2**127//node_count)) for i in range(0, node_count) ]
def remove(self, node=None):
if node is not None:
if not node.name in self.nodes:
return
del self.nodes[node.name]
if node in self.seeds:
self.seeds.remove(node)
self.__update_config()
node.stop(gently=False)
shutil.rmtree(node.get_path())
else:
self.stop(gently=False)
shutil.rmtree(self.get_path())
def clear(self):
self.stop()
for node in list(self.nodes.values()):
node.clear()
def get_path(self):
return os.path.join(self.__path, self.name)
def get_seeds(self):
return [ s.network_interfaces['storage'][0] for s in self.seeds ]
def show(self, verbose):
if len(list(self.nodes.values())) == 0:
print_("No node in this cluster yet")
return
for node in list(self.nodes.values()):
if (verbose):
node.show(show_cluster=False)
print_("")
else:
node.show(only_status=True)
def start(self, no_wait=False, verbose=False, wait_for_binary_proto=False, wait_other_notice=False, jvm_args=[], profile_options=None):
if wait_other_notice:
marks = [ (node, node.mark_log()) for node in list(self.nodes.values()) if node.is_running() ]
started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()
p = node.start(update_pid=False, jvm_args=jvm_args, profile_options=profile_options)
started.append((node, p, mark))
if no_wait and not verbose:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
try:
node.watch_log_for("Listening for thrift clients...", process=p, verbose=verbose, from_mark=mark)
except RuntimeError:
return None
self.__update_pids(started)
for node, p, _ in started:
if not node.is_running():
raise NodeError("Error starting {0}.".format(node.name), p)
if not no_wait and self.version() >= "0.8":
# 0.7 gossip messages seems less predictible that from 0.8 onwards and
# I don't care enough
for node, _, mark in started:
for other_node, _, _ in started:
if other_node is not node:
node.watch_log_for_alive(other_node, from_mark=mark)
if wait_other_notice:
for node, mark in marks:
node.watch_log_for_alive(self, from_mark=mark)
if wait_for_binary_proto:
for node, _, mark in started:
node.watch_log_for("Starting listening for CQL clients", process=p, verbose=verbose, from_mark=mark)
time.sleep(0.2)
return started
def stop(self, wait=True, gently=True):
not_running = []
for node in list(self.nodes.values()):
if not node.stop(wait, gently=gently):
not_running.append(node)
return not_running
def set_log_level(self, new_level, class_name=None):
known_level = [ 'TRACE', 'DEBUG', 'INFO', 'WARN', 'ERROR' ]
if new_level not in known_level:
raise common.ArgumentError("Unknown log level %s (use one of %s)" % (new_level, " ".join(known_level)))
self.__log_level = new_level
self.__update_config()
for node in self.nodelist():
node.set_log_level(new_level, class_name)
def nodetool(self, nodetool_cmd):
for node in list(self.nodes.values()):
if node.is_running():
node.nodetool(nodetool_cmd)
return self
def stress(self, stress_options):
stress = common.get_stress_bin(self.get_cassandra_dir())
livenodes = [ node.network_interfaces['storage'][0] for node in list(self.nodes.values()) if node.is_live() ]
if len(livenodes) == 0:
print_("No live node")
return
args = [ stress, '-d', ",".join(livenodes) ] + stress_options
try:
# need to set working directory for env on Windows
if common.is_win():
subprocess.call(args, cwd=common.parse_path(stress))
else:
subprocess.call(args)
except KeyboardInterrupt:
pass
return self
def run_cli(self, cmds=None, show_output=False, cli_options=[]):
livenodes = [ node for node in list(self.nodes.values()) if node.is_live() ]
if len(livenodes) == 0:
raise common.ArgumentError("No live node")
livenodes[0].run_cli(cmds, show_output, cli_options)
def set_configuration_options(self, values=None, batch_commitlog=None):
if values is not None:
for k, v in iteritems(values):
self._config_options[k] = v
if batch_commitlog is not None:
if batch_commitlog:
self._config_options["commitlog_sync"] = "batch"
self._config_options["commitlog_sync_batch_window_in_ms"] = 5
self._config_options["commitlog_sync_period_in_ms"] = None
else:
self._config_options["commitlog_sync"] = "periodic"
self._config_options["commitlog_sync_period_in_ms"] = 10000
self._config_options["commitlog_sync_batch_window_in_ms"] = None
self.__update_config()
for node in list(self.nodes.values()):
node.import_config_files()
return self
def flush(self):
self.nodetool("flush")
def compact(self):
self.nodetool("compact")
def drain(self):
self.nodetool("drain")
def repair(self):
self.nodetool("repair")
def cleanup(self):
self.nodetool("cleanup")
def decommission(self):
for node in list(self.nodes.values()):
if node.is_running():
node.decommission()
def removeToken(self, token):
self.nodetool("removeToken " + str(token))
def bulkload(self, options):
loader = BulkLoader(self)
loader.load(options)
def scrub(self, options):
for node in list(self.nodes.values()):
node.scrub(options)
def update_log4j(self, new_log4j_config):
# iterate over all nodes
for node in self.nodelist():
node.update_log4j(new_log4j_config)
def update_logback(self, new_logback_config):
# iterate over all nodes
for node in self.nodelist():
node.update_logback(new_logback_config)
def __get_version_from_build(self):
return common.get_version_from_build(self.get_cassandra_dir())
def __update_config(self):
node_list = [ node.name for node in list(self.nodes.values()) ]
seed_list = [ node.name for node in self.seeds ]
filename = os.path.join(self.__path, self.name, 'cluster.conf')
with open(filename, 'w') as f:
yaml.safe_dump({
'name' : self.name,
'nodes' : node_list,
'seeds' : seed_list,
'partitioner' : self.partitioner,
'cassandra_dir' : self.__cassandra_dir,
'config_options' : self._config_options,
'log_level' : self.__log_level,
'use_vnodes' : self.use_vnodes
}, f)
def __update_pids(self, started):
for node, p, _ in started:
node._update_pid(p)
def __update_topology_files(self):
dcs = [('default', 'dc1')]
for node in self.nodelist():
if node.data_center is not None:
dcs.append((node.address(), node.data_center))
content = ""
for k, v in dcs:
content = "%s%s=%s:r1\n" % (content, k, v)
for node in self.nodelist():
topology_file = os.path.join(node.get_conf_dir(), 'cassandra-topology.properties')
with open(topology_file, 'w') as f:
f.write(content)
Fixes #38. Multi-dc tokens are now balanced.
Previously tokens were distributed to balance for a single
DC, regardless of how many exists. This now splits the token range
for each DC, using an offset to prevent token collision. This will
make NetworkTopologyStrategy work correctly with multi-DC, but
will break Simple, though the docs recommend against mixing Simple
across multi-dc's.
# ccm clusters
from six import print_, iteritems
from six.moves import xrange
import yaml
import os
import re
import subprocess
import shutil
import time
from ccmlib import common, repository
from ccmlib.node import Node, NodeError
from ccmlib.bulkloader import BulkLoader
class Cluster():
def __init__(self, path, name, partitioner=None, cassandra_dir=None, create_directory=True, cassandra_version=None, verbose=False):
self.name = name
self.nodes = {}
self.seeds = []
self.partitioner = partitioner
self._config_options = {}
self.__log_level = "INFO"
self.__path = path
self.__version = None
self.use_vnodes = False
if create_directory:
# we create the dir before potentially downloading to throw an error sooner if need be
os.mkdir(self.get_path())
try:
if cassandra_version is None:
# at this point, cassandra_dir should always not be None, but
# we keep this for backward compatibility (in loading old cluster)
if cassandra_dir is not None:
if common.is_win():
self.__cassandra_dir = cassandra_dir
else:
self.__cassandra_dir = os.path.abspath(cassandra_dir)
self.__version = self.__get_version_from_build()
else:
dir, v = repository.setup(cassandra_version, verbose)
self.__cassandra_dir = dir
self.__version = v if v is not None else self.__get_version_from_build()
if create_directory:
common.validate_cassandra_dir(self.__cassandra_dir)
self.__update_config()
except:
if create_directory:
shutil.rmtree(self.get_path())
raise
def set_partitioner(self, partitioner):
self.partitioner = partitioner
self.__update_config()
return self
def set_cassandra_dir(self, cassandra_dir=None, cassandra_version=None, verbose=False):
if cassandra_version is None:
self.__cassandra_dir = cassandra_dir
common.validate_cassandra_dir(cassandra_dir)
self.__version = self.__get_version_from_build()
else:
dir, v = repository.setup(cassandra_version, verbose)
self.__cassandra_dir = dir
self.__version = v if v is not None else self.__get_version_from_build()
self.__update_config()
for node in list(self.nodes.values()):
node.import_config_files()
# if any nodes have a data center, let's update the topology
if any( [node.data_center for node in self.nodes.values()] ):
self.__update_topology_files()
return self
def get_cassandra_dir(self):
common.validate_cassandra_dir(self.__cassandra_dir)
return self.__cassandra_dir
def nodelist(self):
return [ self.nodes[name] for name in sorted(self.nodes.keys()) ]
def version(self):
return self.__version
@staticmethod
def load(path, name):
cluster_path = os.path.join(path, name)
filename = os.path.join(cluster_path, 'cluster.conf')
with open(filename, 'r') as f:
data = yaml.load(f)
try:
cassandra_dir = None
if 'cassandra_dir' in data:
cassandra_dir = data['cassandra_dir']
repository.validate(cassandra_dir)
cluster = Cluster(path, data['name'], cassandra_dir=cassandra_dir, create_directory=False)
node_list = data['nodes']
seed_list = data['seeds']
if 'partitioner' in data:
cluster.partitioner = data['partitioner']
if 'config_options' in data:
cluster._config_options = data['config_options']
if 'log_level' in data:
cluster.__log_level = data['log_level']
if 'use_vnodes' in data:
cluster.use_vnodes = data['use_vnodes']
except KeyError as k:
raise common.LoadError("Error Loading " + filename + ", missing property:" + k)
for node_name in node_list:
cluster.nodes[node_name] = Node.load(cluster_path, node_name, cluster)
for seed_name in seed_list:
cluster.seeds.append(cluster.nodes[seed_name])
return cluster
def add(self, node, is_seed, data_center=None):
if node.name in self.nodes:
raise common.ArgumentError('Cannot create existing node %s' % node.name)
self.nodes[node.name] = node
if is_seed:
self.seeds.append(node)
self.__update_config()
node.data_center = data_center
node.set_log_level(self.__log_level)
if data_center is not None:
self.__update_topology_files()
node._save()
return self
def populate(self, nodes, debug=False, tokens=None, use_vnodes=False, ipprefix='127.0.0.'):
node_count = nodes
dcs = []
self.use_vnodes = use_vnodes
if isinstance(nodes, list):
self.set_configuration_options(values={'endpoint_snitch' : 'org.apache.cassandra.locator.PropertyFileSnitch'})
node_count = 0
i = 0
for c in nodes:
i = i + 1
node_count = node_count + c
for x in xrange(0, c):
dcs.append('dc%d' % i)
if node_count < 1:
raise common.ArgumentError('invalid node count %s' % nodes)
for i in xrange(1, node_count + 1):
if 'node%s' % i in list(self.nodes.values()):
raise common.ArgumentError('Cannot create existing node node%s' % i)
if tokens is None and not use_vnodes:
if dcs is None or len(dcs) <= 1:
tokens = self.balanced_tokens(node_count)
else:
tokens = self.balanced_tokens_across_dcs(dcs)
for i in xrange(1, node_count + 1):
tk = None
if tokens is not None and i-1 < len(tokens):
tk = tokens[i-1]
dc = dcs[i-1] if i-1 < len(dcs) else None
binary = None
if self.version() >= '1.2':
binary = ('%s%s' % (ipprefix, i), 9042)
node = Node('node%s' % i,
self,
False,
('%s%s' % (ipprefix, i), 9160),
('%s%s' % (ipprefix, i), 7000),
str(7000 + i * 100),
(str(0), str(2000 + i * 100))[debug == True],
tk,
binary_interface=binary)
self.add(node, True, dc)
self.__update_config()
return self
def balanced_tokens(self, node_count):
if self.version() >= '1.2' and not self.partitioner:
ptokens = [(i*(2**64//node_count)) for i in xrange(0, node_count)]
return [int(t - 2**63) for t in ptokens]
return [ int(i*(2**127//node_count)) for i in range(0, node_count) ]
def balanced_tokens_across_dcs(self, dcs):
tokens = []
current_dc = dcs[0]
count = 0
dc_count = 0
for dc in dcs:
if dc == current_dc:
count += 1
else:
new_tokens = [tk+dc_count for tk in self.balanced_tokens(count)]
tokens.extend(new_tokens)
current_dc = dc
count = 1
dc_count += 1
new_tokens = [tk+dc_count for tk in self.balanced_tokens(count)]
tokens.extend(new_tokens)
return tokens
def remove(self, node=None):
if node is not None:
if not node.name in self.nodes:
return
del self.nodes[node.name]
if node in self.seeds:
self.seeds.remove(node)
self.__update_config()
node.stop(gently=False)
shutil.rmtree(node.get_path())
else:
self.stop(gently=False)
shutil.rmtree(self.get_path())
def clear(self):
self.stop()
for node in list(self.nodes.values()):
node.clear()
def get_path(self):
return os.path.join(self.__path, self.name)
def get_seeds(self):
return [ s.network_interfaces['storage'][0] for s in self.seeds ]
def show(self, verbose):
if len(list(self.nodes.values())) == 0:
print_("No node in this cluster yet")
return
for node in list(self.nodes.values()):
if (verbose):
node.show(show_cluster=False)
print_("")
else:
node.show(only_status=True)
def start(self, no_wait=False, verbose=False, wait_for_binary_proto=False, wait_other_notice=False, jvm_args=[], profile_options=None):
if wait_other_notice:
marks = [ (node, node.mark_log()) for node in list(self.nodes.values()) if node.is_running() ]
started = []
for node in list(self.nodes.values()):
if not node.is_running():
mark = 0
if os.path.exists(node.logfilename()):
mark = node.mark_log()
p = node.start(update_pid=False, jvm_args=jvm_args, profile_options=profile_options)
started.append((node, p, mark))
if no_wait and not verbose:
time.sleep(2) # waiting 2 seconds to check for early errors and for the pid to be set
else:
for node, p, mark in started:
try:
node.watch_log_for("Listening for thrift clients...", process=p, verbose=verbose, from_mark=mark)
except RuntimeError:
return None
self.__update_pids(started)
for node, p, _ in started:
if not node.is_running():
raise NodeError("Error starting {0}.".format(node.name), p)
if not no_wait and self.version() >= "0.8":
# 0.7 gossip messages seems less predictible that from 0.8 onwards and
# I don't care enough
for node, _, mark in started:
for other_node, _, _ in started:
if other_node is not node:
node.watch_log_for_alive(other_node, from_mark=mark)
if wait_other_notice:
for node, mark in marks:
node.watch_log_for_alive(self, from_mark=mark)
if wait_for_binary_proto:
for node, _, mark in started:
node.watch_log_for("Starting listening for CQL clients", process=p, verbose=verbose, from_mark=mark)
time.sleep(0.2)
return started
def stop(self, wait=True, gently=True):
not_running = []
for node in list(self.nodes.values()):
if not node.stop(wait, gently=gently):
not_running.append(node)
return not_running
def set_log_level(self, new_level, class_name=None):
known_level = [ 'TRACE', 'DEBUG', 'INFO', 'WARN', 'ERROR' ]
if new_level not in known_level:
raise common.ArgumentError("Unknown log level %s (use one of %s)" % (new_level, " ".join(known_level)))
self.__log_level = new_level
self.__update_config()
for node in self.nodelist():
node.set_log_level(new_level, class_name)
def nodetool(self, nodetool_cmd):
for node in list(self.nodes.values()):
if node.is_running():
node.nodetool(nodetool_cmd)
return self
def stress(self, stress_options):
stress = common.get_stress_bin(self.get_cassandra_dir())
livenodes = [ node.network_interfaces['storage'][0] for node in list(self.nodes.values()) if node.is_live() ]
if len(livenodes) == 0:
print_("No live node")
return
args = [ stress, '-d', ",".join(livenodes) ] + stress_options
try:
# need to set working directory for env on Windows
if common.is_win():
subprocess.call(args, cwd=common.parse_path(stress))
else:
subprocess.call(args)
except KeyboardInterrupt:
pass
return self
def run_cli(self, cmds=None, show_output=False, cli_options=[]):
livenodes = [ node for node in list(self.nodes.values()) if node.is_live() ]
if len(livenodes) == 0:
raise common.ArgumentError("No live node")
livenodes[0].run_cli(cmds, show_output, cli_options)
def set_configuration_options(self, values=None, batch_commitlog=None):
if values is not None:
for k, v in iteritems(values):
self._config_options[k] = v
if batch_commitlog is not None:
if batch_commitlog:
self._config_options["commitlog_sync"] = "batch"
self._config_options["commitlog_sync_batch_window_in_ms"] = 5
self._config_options["commitlog_sync_period_in_ms"] = None
else:
self._config_options["commitlog_sync"] = "periodic"
self._config_options["commitlog_sync_period_in_ms"] = 10000
self._config_options["commitlog_sync_batch_window_in_ms"] = None
self.__update_config()
for node in list(self.nodes.values()):
node.import_config_files()
return self
def flush(self):
self.nodetool("flush")
def compact(self):
self.nodetool("compact")
def drain(self):
self.nodetool("drain")
def repair(self):
self.nodetool("repair")
def cleanup(self):
self.nodetool("cleanup")
def decommission(self):
for node in list(self.nodes.values()):
if node.is_running():
node.decommission()
def removeToken(self, token):
self.nodetool("removeToken " + str(token))
def bulkload(self, options):
loader = BulkLoader(self)
loader.load(options)
def scrub(self, options):
for node in list(self.nodes.values()):
node.scrub(options)
def update_log4j(self, new_log4j_config):
# iterate over all nodes
for node in self.nodelist():
node.update_log4j(new_log4j_config)
def update_logback(self, new_logback_config):
# iterate over all nodes
for node in self.nodelist():
node.update_logback(new_logback_config)
def __get_version_from_build(self):
return common.get_version_from_build(self.get_cassandra_dir())
def __update_config(self):
node_list = [ node.name for node in list(self.nodes.values()) ]
seed_list = [ node.name for node in self.seeds ]
filename = os.path.join(self.__path, self.name, 'cluster.conf')
with open(filename, 'w') as f:
yaml.safe_dump({
'name' : self.name,
'nodes' : node_list,
'seeds' : seed_list,
'partitioner' : self.partitioner,
'cassandra_dir' : self.__cassandra_dir,
'config_options' : self._config_options,
'log_level' : self.__log_level,
'use_vnodes' : self.use_vnodes
}, f)
def __update_pids(self, started):
for node, p, _ in started:
node._update_pid(p)
def __update_topology_files(self):
dcs = [('default', 'dc1')]
for node in self.nodelist():
if node.data_center is not None:
dcs.append((node.address(), node.data_center))
content = ""
for k, v in dcs:
content = "%s%s=%s:r1\n" % (content, k, v)
for node in self.nodelist():
topology_file = os.path.join(node.get_conf_dir(), 'cassandra-topology.properties')
with open(topology_file, 'w') as f:
f.write(content)
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import re
import random
import signal
import warnings
import multiprocessing
from zoo.ray.process import session_execute, ProcessMonitor
from zoo.ray.utils import is_local
from zoo.ray.utils import resource_to_bytes
class JVMGuard:
"""
The registered pids would be put into the killing list of Spark Executor.
"""
@staticmethod
def register_pids(pids):
import traceback
try:
from zoo.common.utils import callZooFunc
import zoo
callZooFunc("float",
"jvmGuardRegisterPids",
pids)
except Exception as err:
print(traceback.format_exc())
print("Cannot successfully register pid into JVMGuard")
for pid in pids:
os.kill(pid, signal.SIGKILL)
raise err
def kill_redundant_log_monitors(redis_address):
"""
Killing redundant log_monitor.py processes.
If multiple ray nodes are started on the same machine,
there will be multiple ray log_monitor.py processes
monitoring the same log dir. As a result, the logs
will be replicated multiple times and forwarded to driver.
See issue https://github.com/ray-project/ray/issues/10392
"""
import psutil
import subprocess
log_monitor_processes = []
for proc in psutil.process_iter(["name", "cmdline"]):
cmdline = subprocess.list2cmdline(proc.cmdline())
is_log_monitor = "log_monitor.py" in cmdline
is_same_redis = "--redis-address={}".format(redis_address)
if is_log_monitor and is_same_redis in cmdline:
log_monitor_processes.append(proc)
if len(log_monitor_processes) > 1:
for proc in log_monitor_processes[1:]:
proc.kill()
class RayServiceFuncGenerator(object):
"""
This should be a pickable class.
"""
def _prepare_env(self):
modified_env = os.environ.copy()
if self.python_loc == "python_env/bin/python":
# In this case the executor is using the conda yarn archive under the current
# working directory. Need to get the full path.
executor_python_path = "{}/{}".format(
os.getcwd(), "/".join(self.python_loc.split("/")[:-1]))
else:
executor_python_path = "/".join(self.python_loc.split("/")[:-1])
if "PATH" in os.environ:
modified_env["PATH"] = "{}:{}".format(executor_python_path, os.environ["PATH"])
else:
modified_env["PATH"] = executor_python_path
modified_env["LC_ALL"] = "C.UTF-8"
modified_env["LANG"] = "C.UTF-8"
modified_env.pop("MALLOC_ARENA_MAX", None)
modified_env.pop("RAY_BACKEND_LOG_LEVEL", None)
# Unset all MKL setting as Analytics Zoo would give default values when init env.
# Running different programs may need different configurations.
modified_env.pop("intra_op_parallelism_threads", None)
modified_env.pop("inter_op_parallelism_threads", None)
modified_env.pop("OMP_NUM_THREADS", None)
modified_env.pop("KMP_BLOCKTIME", None)
modified_env.pop("KMP_AFFINITY", None)
modified_env.pop("KMP_SETTINGS", None)
if self.env: # Add in env argument if any MKL setting is needed.
modified_env.update(self.env)
if self.verbose:
print("Executing with these environment settings:")
for pair in modified_env.items():
print(pair)
print("The $PATH is: {}".format(modified_env["PATH"]))
return modified_env
def __init__(self, python_loc, redis_port, ray_node_cpu_cores,
password, object_store_memory, verbose=False, env=None,
extra_params=None):
"""object_store_memory: integer in bytes"""
self.env = env
self.python_loc = python_loc
self.redis_port = redis_port
self.password = password
self.ray_node_cpu_cores = ray_node_cpu_cores
self.ray_exec = self._get_ray_exec()
self.object_store_memory = object_store_memory
self.extra_params = extra_params
self.verbose = verbose
# _mxnet_worker and _mxnet_server are resource tags for distributed MXNet training only
# in order to diff worker from server.
# This is useful to allocate workers and servers in the cluster.
# Leave some reserved custom resources free to avoid unknown crash due to resources.
self.labels = \
"""--resources '{"_mxnet_worker": %s, "_mxnet_server": %s, "_reserved": %s}'""" \
% (1, 1, 2)
def gen_stop(self):
def _stop(iter):
command = "{} stop".format(self.ray_exec)
print("Start to end the ray services: {}".format(command))
session_execute(command=command, fail_fast=True)
return iter
return _stop
@staticmethod
def _enrich_command(command, object_store_memory, extra_params):
if object_store_memory:
command = command + " --object-store-memory {}".format(str(object_store_memory))
if extra_params:
for pair in extra_params.items():
command = command + " --{} {}".format(pair[0], pair[1])
return command
def _gen_master_command(self):
command = "{} start --head " \
"--include-webui true --redis-port {} " \
"--redis-password {} --num-cpus {}". \
format(self.ray_exec, self.redis_port, self.password,
self.ray_node_cpu_cores)
if self.labels:
command = command + " " + self.labels
return RayServiceFuncGenerator._enrich_command(command=command,
object_store_memory=self.object_store_memory,
extra_params=self.extra_params)
@staticmethod
def _get_raylet_command(redis_address,
ray_exec,
password,
ray_node_cpu_cores,
labels="",
object_store_memory=None,
extra_params=None):
command = "{} start --address {} --redis-password {} --num-cpus {}".format(
ray_exec, redis_address, password, ray_node_cpu_cores)
if labels:
command = command + " " + labels
return RayServiceFuncGenerator._enrich_command(command=command,
object_store_memory=object_store_memory,
extra_params=extra_params)
def _start_ray_node(self, command, tag):
modified_env = self._prepare_env()
print("Starting {} by running: {}".format(tag, command))
process_info = session_execute(command=command, env=modified_env, tag=tag)
JVMGuard.register_pids(process_info.pids)
import ray.services as rservices
process_info.node_ip = rservices.get_node_ip_address()
return process_info
def _get_ray_exec(self):
python_bin_dir = "/".join(self.python_loc.split("/")[:-1])
return "{}/python {}/ray".format(python_bin_dir, python_bin_dir)
def gen_ray_start(self):
def _start_ray_services(iter):
from pyspark import BarrierTaskContext
tc = BarrierTaskContext.get()
# The address is sorted by partitionId according to the comments
# Partition 0 is the Master
task_addrs = [taskInfo.address for taskInfo in tc.getTaskInfos()]
print(task_addrs)
master_ip = task_addrs[0].split(":")[0]
print("current address {}".format(task_addrs[tc.partitionId()]))
print("master address {}".format(master_ip))
redis_address = "{}:{}".format(master_ip, self.redis_port)
process_info = None
if tc.partitionId() == 0:
print("partition id is : {}".format(tc.partitionId()))
process_info = self._start_ray_node(command=self._gen_master_command(),
tag="ray-master")
process_info.master_addr = redis_address
tc.barrier()
if tc.partitionId() != 0:
import tempfile
import filelock
base_path = tempfile.gettempdir()
lock_path = os.path.join(base_path, "ray_on_spark_start.lock")
with filelock.FileLock(lock_path):
print("partition id is : {}".format(tc.partitionId()))
process_info = self._start_ray_node(
command=RayServiceFuncGenerator._get_raylet_command(
redis_address=redis_address,
ray_exec=self.ray_exec,
password=self.password,
ray_node_cpu_cores=self.ray_node_cpu_cores,
labels=self.labels,
object_store_memory=self.object_store_memory,
extra_params=self.extra_params),
tag="raylet")
kill_redundant_log_monitors(redis_address=redis_address)
yield process_info
return _start_ray_services
class RayContext(object):
_active_ray_context = None
def __init__(self, sc, redis_port=None, password="123456", object_store_memory=None,
verbose=False, env=None, extra_params=None,
num_ray_nodes=None, ray_node_cpu_cores=None):
"""
The RayContext would initiate a ray cluster on top of the configuration of SparkContext.
After creating RayContext, call the init method to set up the cluster.
- For Spark local mode: The total available cores for Ray is equal to the number of
Spark local cores.
- For Spark cluster mode: The number of raylets to be created is equal to the number of
Spark executors. The number of cores allocated for each raylet is equal to the number of
cores for each Spark executor.
You are allowed to specify num_ray_nodes and ray_node_cpu_cores for configurations
to start raylets.
:param sc: An instance of SparkContext.
:param redis_port: redis port for the "head" node.
The value would be randomly picked if not specified.
:param password: Password for the redis. Default to be "123456" if not specified.
:param object_store_memory: The memory size for ray object_store in string.
This can be specified in bytes(b), kilobytes(k), megabytes(m) or gigabytes(g).
For example, 50b, 100k, 250m, 30g.
:param verbose: True for more logs when starting ray. Default is False.
:param env: The environment variable dict for running ray processes. Default is None.
:param extra_params: The key value dict for extra options to launch ray.
For example, extra_params={"temp-dir": "/tmp/ray/"}
:param num_ray_nodes: The number of raylets to start across the cluster.
For Spark local mode, you don't need to specify this value.
For Spark cluster mode, it is default to be the number of Spark executors. If
spark.executor.instances can't be detected in your SparkContext, you need to explicitly
specify this. It is recommended that num_ray_nodes is not larger than the number of
Spark executors to make sure there are enough resources in your cluster.
:param ray_node_cpu_cores: The number of available cores for each raylet.
For Spark local mode, it is default to be the number of Spark local cores.
For Spark cluster mode, it is default to be the number of cores for each Spark executor. If
spark.executor.cores or spark.cores.max can't be detected in your SparkContext, you need to
explicitly specify this. It is recommended that ray_node_cpu_cores is not larger than the
number of cores for each Spark executor to make sure there are enough resources in your
cluster.
"""
assert sc is not None, "sc cannot be None, please create a SparkContext first"
self.sc = sc
self.initialized = False
self.is_local = is_local(sc)
self.verbose = verbose
self.redis_password = password
self.object_store_memory = resource_to_bytes(object_store_memory)
self.ray_processesMonitor = None
self.env = env
self.extra_params = extra_params
self._address_info = None
if self.is_local:
self.num_ray_nodes = 1
spark_cores = self._get_spark_local_cores()
if ray_node_cpu_cores:
ray_node_cpu_cores = int(ray_node_cpu_cores)
if ray_node_cpu_cores > spark_cores:
warnings.warn("ray_node_cpu_cores is larger than available Spark cores, "
"make sure there are enough resources on your machine")
self.ray_node_cpu_cores = ray_node_cpu_cores
else:
self.ray_node_cpu_cores = spark_cores
# For Spark local mode, directly call ray.init() and ray.shutdown().
# ray.shutdown() would clear up all the ray related processes.
# Ray Manager is only needed for Spark cluster mode to monitor ray processes.
else:
if self.sc.getConf().contains("spark.executor.cores"):
executor_cores = int(self.sc.getConf().get("spark.executor.cores"))
else:
executor_cores = None
if ray_node_cpu_cores:
ray_node_cpu_cores = int(ray_node_cpu_cores)
if executor_cores and ray_node_cpu_cores > executor_cores:
warnings.warn("ray_node_cpu_cores is larger than Spark executor cores, "
"make sure there are enough resources on your cluster")
self.ray_node_cpu_cores = ray_node_cpu_cores
elif executor_cores:
self.ray_node_cpu_cores = executor_cores
else:
raise Exception("spark.executor.cores not detected in the SparkContext, "
"you need to manually specify num_ray_nodes and ray_node_cpu_cores "
"for RayContext to start ray services")
if self.sc.getConf().contains("spark.executor.instances"):
num_executors = int(self.sc.getConf().get("spark.executor.instances"))
elif self.sc.getConf().contains("spark.cores.max"):
import math
num_executors = math.floor(
int(self.sc.getConf().get("spark.cores.max")) / self.ray_node_cpu_cores)
else:
num_executors = None
if num_ray_nodes:
num_ray_nodes = int(num_ray_nodes)
if num_executors and num_ray_nodes > num_executors:
warnings.warn("num_ray_nodes is larger than the number of Spark executors, "
"make sure there are enough resources on your cluster")
self.num_ray_nodes = num_ray_nodes
elif num_executors:
self.num_ray_nodes = num_executors
else:
raise Exception("spark.executor.cores not detected in the SparkContext, "
"you need to manually specify num_ray_nodes and ray_node_cpu_cores "
"for RayContext to start ray services")
self.python_loc = os.environ['PYSPARK_PYTHON']
self.redis_port = random.randint(10000, 65535) if not redis_port else int(redis_port)
self.ray_service = RayServiceFuncGenerator(
python_loc=self.python_loc,
redis_port=self.redis_port,
ray_node_cpu_cores=self.ray_node_cpu_cores,
password=self.redis_password,
object_store_memory=self.object_store_memory,
verbose=self.verbose,
env=self.env,
extra_params=self.extra_params)
self._gather_cluster_ips()
from bigdl.util.common import init_executor_gateway
print("Start to launch the JVM guarding process")
init_executor_gateway(sc)
print("JVM guarding process has been successfully launched")
RayContext._active_ray_context = self
@classmethod
def get(cls, initialize=True):
if RayContext._active_ray_context:
ray_ctx = RayContext._active_ray_context
if initialize and not ray_ctx.initialized:
ray_ctx.init()
return ray_ctx
else:
raise Exception("No active RayContext. Please create a RayContext and init it first")
def _gather_cluster_ips(self):
total_cores = int(self.num_ray_nodes) * int(self.ray_node_cpu_cores)
def info_fn(iter):
from pyspark import BarrierTaskContext
tc = BarrierTaskContext.get()
task_addrs = [taskInfo.address.split(":")[0] for taskInfo in tc.getTaskInfos()]
yield task_addrs
tc.barrier()
ips = self.sc.range(0, total_cores,
numSlices=total_cores).barrier().mapPartitions(info_fn).collect()
return ips[0]
def stop(self):
if not self.initialized:
print("The Ray cluster has not been launched.")
return
import ray
ray.shutdown()
if not self.is_local:
if not self.ray_processesMonitor:
print("Please start the runner first before closing it")
else:
self.ray_processesMonitor.clean_fn()
self.initialized = False
def purge(self):
"""
Invoke ray stop to clean ray processes.
"""
if not self.initialized:
print("The Ray cluster has not been launched.")
return
if self.is_local:
import ray
ray.shutdown()
else:
self.sc.range(0,
self.num_ray_nodes,
numSlices=self.num_ray_nodes).barrier().mapPartitions(
self.ray_service.gen_stop()).collect()
self.initialized = False
def _get_spark_local_cores(self):
local_symbol = re.match(r"local\[(.*)\]", self.sc.master).group(1)
if local_symbol == "*":
return multiprocessing.cpu_count()
else:
return int(local_symbol)
def init(self, driver_cores=0):
"""
Initiate the ray cluster.
:param driver_cores: The number of cores for the raylet on driver for Spark cluster mode.
Default is 0 and in this case the local driver wouldn't have any ray workload.
:return The dictionary of address information about the ray cluster.
Information contains node_ip_address, redis_address, object_store_address,
raylet_socket_name, webui_url and session_dir.
"""
if self.initialized:
print("The Ray cluster has been launched.")
else:
if self.is_local:
if self.env:
os.environ.update(self.env)
import ray
self._address_info = ray.init(num_cpus=self.ray_node_cpu_cores,
object_store_memory=self.object_store_memory,
resources=self.extra_params)
else:
self._start_cluster()
self._address_info = self._start_driver(num_cores=driver_cores)
print(self._address_info)
kill_redundant_log_monitors(self._address_info["redis_address"])
self.initialized = True
return self._address_info
@property
def address_info(self):
if self._address_info:
return self._address_info
else:
raise Exception("The Ray cluster has not been launched yet. Please call init first")
def _start_cluster(self):
print("Start to launch ray on cluster")
ray_rdd = self.sc.range(0, self.num_ray_nodes,
numSlices=self.num_ray_nodes)
process_infos = ray_rdd.barrier().mapPartitions(
self.ray_service.gen_ray_start()).collect()
self.ray_processesMonitor = ProcessMonitor(process_infos, self.sc, ray_rdd, self,
verbose=self.verbose)
self.redis_address = self.ray_processesMonitor.master.master_addr
return self
def _start_restricted_worker(self, num_cores, node_ip_address):
extra_param = {"node-ip-address": node_ip_address}
if self.extra_params is not None:
extra_param.update(self.extra_params)
command = RayServiceFuncGenerator._get_raylet_command(
redis_address=self.redis_address,
ray_exec="ray",
password=self.redis_password,
ray_node_cpu_cores=num_cores,
object_store_memory=self.object_store_memory,
extra_params=extra_param)
print("Executing command: {}".format(command))
process_info = session_execute(command=command, fail_fast=True)
ProcessMonitor.register_shutdown_hook(pgid=process_info.pgid)
def _start_driver(self, num_cores=0):
print("Start to launch ray driver on local")
import ray.services
node_ip = ray.services.get_node_ip_address(self.redis_address)
self._start_restricted_worker(num_cores=num_cores,
node_ip_address=node_ip)
ray.shutdown()
return ray.init(address=self.redis_address,
redis_password=self.ray_service.password,
node_ip_address=node_ip)
Support RayOnSpark for k8s and add docs (#2836)
* support ray on k8s
* add to init orca context
* style
* minor
* minor
* ut
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import re
import random
import signal
import warnings
import multiprocessing
from zoo.ray.process import session_execute, ProcessMonitor
from zoo.ray.utils import is_local
from zoo.ray.utils import resource_to_bytes
class JVMGuard:
"""
The registered pids would be put into the killing list of Spark Executor.
"""
@staticmethod
def register_pids(pids):
import traceback
try:
from zoo.common.utils import callZooFunc
import zoo
callZooFunc("float",
"jvmGuardRegisterPids",
pids)
except Exception as err:
print(traceback.format_exc())
print("Cannot successfully register pid into JVMGuard")
for pid in pids:
os.kill(pid, signal.SIGKILL)
raise err
def kill_redundant_log_monitors(redis_address):
"""
Killing redundant log_monitor.py processes.
If multiple ray nodes are started on the same machine,
there will be multiple ray log_monitor.py processes
monitoring the same log dir. As a result, the logs
will be replicated multiple times and forwarded to driver.
See issue https://github.com/ray-project/ray/issues/10392
"""
import psutil
import subprocess
log_monitor_processes = []
for proc in psutil.process_iter(["name", "cmdline"]):
cmdline = subprocess.list2cmdline(proc.cmdline())
is_log_monitor = "log_monitor.py" in cmdline
is_same_redis = "--redis-address={}".format(redis_address)
if is_log_monitor and is_same_redis in cmdline:
log_monitor_processes.append(proc)
if len(log_monitor_processes) > 1:
for proc in log_monitor_processes[1:]:
proc.kill()
class RayServiceFuncGenerator(object):
"""
This should be a pickable class.
"""
def _prepare_env(self):
modified_env = os.environ.copy()
if self.python_loc == "python_env/bin/python":
# In this case the executor is using the conda yarn archive under the current
# working directory. Need to get the full path.
executor_python_path = "{}/{}".format(
os.getcwd(), "/".join(self.python_loc.split("/")[:-1]))
else:
executor_python_path = "/".join(self.python_loc.split("/")[:-1])
if "PATH" in os.environ:
modified_env["PATH"] = "{}:{}".format(executor_python_path, os.environ["PATH"])
else:
modified_env["PATH"] = executor_python_path
modified_env["LC_ALL"] = "C.UTF-8"
modified_env["LANG"] = "C.UTF-8"
modified_env.pop("MALLOC_ARENA_MAX", None)
modified_env.pop("RAY_BACKEND_LOG_LEVEL", None)
# Unset all MKL setting as Analytics Zoo would give default values when init env.
# Running different programs may need different configurations.
modified_env.pop("intra_op_parallelism_threads", None)
modified_env.pop("inter_op_parallelism_threads", None)
modified_env.pop("OMP_NUM_THREADS", None)
modified_env.pop("KMP_BLOCKTIME", None)
modified_env.pop("KMP_AFFINITY", None)
modified_env.pop("KMP_SETTINGS", None)
if self.env: # Add in env argument if any MKL setting is needed.
modified_env.update(self.env)
if self.verbose:
print("Executing with these environment settings:")
for pair in modified_env.items():
print(pair)
print("The $PATH is: {}".format(modified_env["PATH"]))
return modified_env
def __init__(self, python_loc, redis_port, ray_node_cpu_cores,
password, object_store_memory, verbose=False, env=None,
extra_params=None):
"""object_store_memory: integer in bytes"""
self.env = env
self.python_loc = python_loc
self.redis_port = redis_port
self.password = password
self.ray_node_cpu_cores = ray_node_cpu_cores
self.ray_exec = self._get_ray_exec()
self.object_store_memory = object_store_memory
self.extra_params = extra_params
self.verbose = verbose
# _mxnet_worker and _mxnet_server are resource tags for distributed MXNet training only
# in order to diff worker from server.
# This is useful to allocate workers and servers in the cluster.
# Leave some reserved custom resources free to avoid unknown crash due to resources.
self.labels = \
"""--resources '{"_mxnet_worker": %s, "_mxnet_server": %s, "_reserved": %s}'""" \
% (1, 1, 2)
def gen_stop(self):
def _stop(iter):
command = "{} stop".format(self.ray_exec)
print("Start to end the ray services: {}".format(command))
session_execute(command=command, fail_fast=True)
return iter
return _stop
@staticmethod
def _enrich_command(command, object_store_memory, extra_params):
if object_store_memory:
command = command + " --object-store-memory {}".format(str(object_store_memory))
if extra_params:
for pair in extra_params.items():
command = command + " --{} {}".format(pair[0], pair[1])
return command
def _gen_master_command(self):
command = "{} start --head " \
"--include-webui true --redis-port {} " \
"--redis-password {} --num-cpus {}". \
format(self.ray_exec, self.redis_port, self.password,
self.ray_node_cpu_cores)
if self.labels:
command = command + " " + self.labels
return RayServiceFuncGenerator._enrich_command(command=command,
object_store_memory=self.object_store_memory,
extra_params=self.extra_params)
@staticmethod
def _get_raylet_command(redis_address,
ray_exec,
password,
ray_node_cpu_cores,
labels="",
object_store_memory=None,
extra_params=None):
command = "{} start --address {} --redis-password {} --num-cpus {}".format(
ray_exec, redis_address, password, ray_node_cpu_cores)
if labels:
command = command + " " + labels
return RayServiceFuncGenerator._enrich_command(command=command,
object_store_memory=object_store_memory,
extra_params=extra_params)
def _start_ray_node(self, command, tag):
modified_env = self._prepare_env()
print("Starting {} by running: {}".format(tag, command))
process_info = session_execute(command=command, env=modified_env, tag=tag)
JVMGuard.register_pids(process_info.pids)
import ray.services as rservices
process_info.node_ip = rservices.get_node_ip_address()
return process_info
def _get_ray_exec(self):
if "envs" in self.python_loc: # conda environment
python_bin_dir = "/".join(self.python_loc.split("/")[:-1])
return "{}/python {}/ray".format(python_bin_dir, python_bin_dir)
else: # system environment with ray installed; for example: /usr/local/bin/ray
return "ray"
def gen_ray_start(self, master_ip):
def _start_ray_services(iter):
from pyspark import BarrierTaskContext
from zoo.util.utils import get_node_ip
tc = BarrierTaskContext.get()
current_ip = get_node_ip()
print("current address {}".format(current_ip))
print("master address {}".format(master_ip))
redis_address = "{}:{}".format(master_ip, self.redis_port)
process_info = None
import tempfile
import filelock
base_path = tempfile.gettempdir()
master_flag_path = os.path.join(base_path, "ray_master_initialized")
if current_ip == master_ip: # Start the ray master.
lock_path = os.path.join(base_path, "ray_master_start.lock")
# It is possible that multiple executors are on one node. In this case,
# the first executor that gets the lock would be the master and it would
# create a flag to indicate the master has initialized.
# The flag file is removed when ray start processes finish so that this
# won't affect other programs.
with filelock.FileLock(lock_path):
if not os.path.exists(master_flag_path):
print("partition id is : {}".format(tc.partitionId()))
process_info = self._start_ray_node(command=self._gen_master_command(),
tag="ray-master")
process_info.master_addr = redis_address
os.mknod(master_flag_path)
tc.barrier()
if not process_info: # Start raylets.
lock_path = os.path.join(base_path, "raylet_start.lock")
with filelock.FileLock(lock_path):
print("partition id is : {}".format(tc.partitionId()))
process_info = self._start_ray_node(
command=RayServiceFuncGenerator._get_raylet_command(
redis_address=redis_address,
ray_exec=self.ray_exec,
password=self.password,
ray_node_cpu_cores=self.ray_node_cpu_cores,
labels=self.labels,
object_store_memory=self.object_store_memory,
extra_params=self.extra_params),
tag="raylet")
kill_redundant_log_monitors(redis_address=redis_address)
if os.path.exists(master_flag_path):
os.remove(master_flag_path)
yield process_info
return _start_ray_services
class RayContext(object):
_active_ray_context = None
def __init__(self, sc, redis_port=None, password="123456", object_store_memory=None,
verbose=False, env=None, extra_params=None,
num_ray_nodes=None, ray_node_cpu_cores=None):
"""
The RayContext would initiate a ray cluster on top of the configuration of SparkContext.
After creating RayContext, call the init method to set up the cluster.
- For Spark local mode: The total available cores for Ray is equal to the number of
Spark local cores.
- For Spark cluster mode: The number of raylets to be created is equal to the number of
Spark executors. The number of cores allocated for each raylet is equal to the number of
cores for each Spark executor.
You are allowed to specify num_ray_nodes and ray_node_cpu_cores for configurations
to start raylets.
:param sc: An instance of SparkContext.
:param redis_port: redis port for the "head" node.
The value would be randomly picked if not specified.
:param password: Password for the redis. Default to be "123456" if not specified.
:param object_store_memory: The memory size for ray object_store in string.
This can be specified in bytes(b), kilobytes(k), megabytes(m) or gigabytes(g).
For example, 50b, 100k, 250m, 30g.
:param verbose: True for more logs when starting ray. Default is False.
:param env: The environment variable dict for running ray processes. Default is None.
:param extra_params: The key value dict for extra options to launch ray.
For example, extra_params={"temp-dir": "/tmp/ray/"}
:param num_ray_nodes: The number of raylets to start across the cluster.
For Spark local mode, you don't need to specify this value.
For Spark cluster mode, it is default to be the number of Spark executors. If
spark.executor.instances can't be detected in your SparkContext, you need to explicitly
specify this. It is recommended that num_ray_nodes is not larger than the number of
Spark executors to make sure there are enough resources in your cluster.
:param ray_node_cpu_cores: The number of available cores for each raylet.
For Spark local mode, it is default to be the number of Spark local cores.
For Spark cluster mode, it is default to be the number of cores for each Spark executor. If
spark.executor.cores or spark.cores.max can't be detected in your SparkContext, you need to
explicitly specify this. It is recommended that ray_node_cpu_cores is not larger than the
number of cores for each Spark executor to make sure there are enough resources in your
cluster.
"""
assert sc is not None, "sc cannot be None, please create a SparkContext first"
self.sc = sc
self.initialized = False
self.is_local = is_local(sc)
self.verbose = verbose
self.redis_password = password
self.object_store_memory = resource_to_bytes(object_store_memory)
self.ray_processesMonitor = None
self.env = env
self.extra_params = extra_params
self._address_info = None
if self.is_local:
self.num_ray_nodes = 1
spark_cores = self._get_spark_local_cores()
if ray_node_cpu_cores:
ray_node_cpu_cores = int(ray_node_cpu_cores)
if ray_node_cpu_cores > spark_cores:
warnings.warn("ray_node_cpu_cores is larger than available Spark cores, "
"make sure there are enough resources on your machine")
self.ray_node_cpu_cores = ray_node_cpu_cores
else:
self.ray_node_cpu_cores = spark_cores
# For Spark local mode, directly call ray.init() and ray.shutdown().
# ray.shutdown() would clear up all the ray related processes.
# Ray Manager is only needed for Spark cluster mode to monitor ray processes.
else:
if self.sc.getConf().contains("spark.executor.cores"):
executor_cores = int(self.sc.getConf().get("spark.executor.cores"))
else:
executor_cores = None
if ray_node_cpu_cores:
ray_node_cpu_cores = int(ray_node_cpu_cores)
if executor_cores and ray_node_cpu_cores > executor_cores:
warnings.warn("ray_node_cpu_cores is larger than Spark executor cores, "
"make sure there are enough resources on your cluster")
self.ray_node_cpu_cores = ray_node_cpu_cores
elif executor_cores:
self.ray_node_cpu_cores = executor_cores
else:
raise Exception("spark.executor.cores not detected in the SparkContext, "
"you need to manually specify num_ray_nodes and ray_node_cpu_cores "
"for RayContext to start ray services")
if self.sc.getConf().contains("spark.executor.instances"):
num_executors = int(self.sc.getConf().get("spark.executor.instances"))
elif self.sc.getConf().contains("spark.cores.max"):
import math
num_executors = math.floor(
int(self.sc.getConf().get("spark.cores.max")) / self.ray_node_cpu_cores)
else:
num_executors = None
if num_ray_nodes:
num_ray_nodes = int(num_ray_nodes)
if num_executors and num_ray_nodes > num_executors:
warnings.warn("num_ray_nodes is larger than the number of Spark executors, "
"make sure there are enough resources on your cluster")
self.num_ray_nodes = num_ray_nodes
elif num_executors:
self.num_ray_nodes = num_executors
else:
raise Exception("spark.executor.cores not detected in the SparkContext, "
"you need to manually specify num_ray_nodes and ray_node_cpu_cores "
"for RayContext to start ray services")
from zoo.util.utils import detect_python_location
self.python_loc = os.environ.get("PYSPARK_PYTHON", detect_python_location())
self.redis_port = random.randint(10000, 65535) if not redis_port else int(redis_port)
self.ray_service = RayServiceFuncGenerator(
python_loc=self.python_loc,
redis_port=self.redis_port,
ray_node_cpu_cores=self.ray_node_cpu_cores,
password=self.redis_password,
object_store_memory=self.object_store_memory,
verbose=self.verbose,
env=self.env,
extra_params=self.extra_params)
RayContext._active_ray_context = self
@classmethod
def get(cls, initialize=True):
if RayContext._active_ray_context:
ray_ctx = RayContext._active_ray_context
if initialize and not ray_ctx.initialized:
ray_ctx.init()
return ray_ctx
else:
raise Exception("No active RayContext. Please create a RayContext and init it first")
def _gather_cluster_ips(self):
"""
Get the ips of all Spark executors in the cluster. The first ip returned would be the
ray master.
"""
def info_fn(iter):
from zoo.util.utils import get_node_ip
yield get_node_ip()
ips = self.sc.range(0, self.num_ray_nodes,
numSlices=self.num_ray_nodes).barrier().mapPartitions(info_fn).collect()
return ips
def stop(self):
if not self.initialized:
print("The Ray cluster has not been launched.")
return
import ray
ray.shutdown()
if not self.is_local:
if not self.ray_processesMonitor:
print("Please start the runner first before closing it")
else:
self.ray_processesMonitor.clean_fn()
self.initialized = False
def purge(self):
"""
Invoke ray stop to clean ray processes.
"""
if not self.initialized:
print("The Ray cluster has not been launched.")
return
if self.is_local:
import ray
ray.shutdown()
else:
self.sc.range(0,
self.num_ray_nodes,
numSlices=self.num_ray_nodes).barrier().mapPartitions(
self.ray_service.gen_stop()).collect()
self.initialized = False
def _get_spark_local_cores(self):
local_symbol = re.match(r"local\[(.*)\]", self.sc.master).group(1)
if local_symbol == "*":
return multiprocessing.cpu_count()
else:
return int(local_symbol)
def init(self, driver_cores=0):
"""
Initiate the ray cluster.
:param driver_cores: The number of cores for the raylet on driver for Spark cluster mode.
Default is 0 and in this case the local driver wouldn't have any ray workload.
:return The dictionary of address information about the ray cluster.
Information contains node_ip_address, redis_address, object_store_address,
raylet_socket_name, webui_url and session_dir.
"""
if self.initialized:
print("The Ray cluster has been launched.")
else:
if self.is_local:
if self.env:
os.environ.update(self.env)
import ray
self._address_info = ray.init(num_cpus=self.ray_node_cpu_cores,
object_store_memory=self.object_store_memory,
resources=self.extra_params)
else:
self.cluster_ips = self._gather_cluster_ips()
from bigdl.util.common import init_executor_gateway
init_executor_gateway(self.sc)
print("JavaGatewayServer has been successfully launched on executors")
self._start_cluster()
self._address_info = self._start_driver(num_cores=driver_cores)
print(self._address_info)
kill_redundant_log_monitors(self._address_info["redis_address"])
self.initialized = True
return self._address_info
@property
def address_info(self):
if self._address_info:
return self._address_info
else:
raise Exception("The Ray cluster has not been launched yet. Please call init first")
def _start_cluster(self):
print("Start to launch ray on cluster")
ray_rdd = self.sc.range(0, self.num_ray_nodes,
numSlices=self.num_ray_nodes)
# The first ip would be used to launch ray master.
process_infos = ray_rdd.barrier().mapPartitions(
self.ray_service.gen_ray_start(self.cluster_ips[0])).collect()
self.ray_processesMonitor = ProcessMonitor(process_infos, self.sc, ray_rdd, self,
verbose=self.verbose)
self.redis_address = self.ray_processesMonitor.master.master_addr
return self
def _start_restricted_worker(self, num_cores, node_ip_address):
extra_param = {"node-ip-address": node_ip_address}
if self.extra_params is not None:
extra_param.update(self.extra_params)
command = RayServiceFuncGenerator._get_raylet_command(
redis_address=self.redis_address,
ray_exec="ray",
password=self.redis_password,
ray_node_cpu_cores=num_cores,
object_store_memory=self.object_store_memory,
extra_params=extra_param)
modified_env = self.ray_service._prepare_env()
print("Executing command: {}".format(command))
process_info = session_execute(command=command, env=modified_env,
tag="raylet", fail_fast=True)
ProcessMonitor.register_shutdown_hook(pgid=process_info.pgid)
def _start_driver(self, num_cores=0):
print("Start to launch ray driver on local")
import ray.services
node_ip = ray.services.get_node_ip_address(self.redis_address)
self._start_restricted_worker(num_cores=num_cores,
node_ip_address=node_ip)
ray.shutdown()
return ray.init(address=self.redis_address,
redis_password=self.ray_service.password,
node_ip_address=node_ip)
|
# -*- coding: utf-8 -*-
import shlex
import logging
import subprocess
try:
from exceptions import RuntimeError
except ImportError:
# Python3 doesn't require this anymore
pass
def asbool(val):
if val is None:
return False
if isinstance(val, bool):
return val
val = str(val).strip().lower()
return val in ("t", "true", "y", "yes", "on", "1")
def aslist(val):
if val is None:
return []
if isinstance(val, list):
return val
return str(val).replace(' ', '').split(',')
def execute(cmd, wait=True):
logging.info("executing command '{}'".format(cmd))
args = shlex.split(cmd)
proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
rv = None
if wait:
out, _ = proc.communicate()
if proc.returncode != 0:
raise RuntimeError("{} exited with {}".format(cmd, proc.returncode))
if hasattr(out, "decode"):
rv = out.decode("utf-8", "ignore")
else:
rv = out
logging.info(u"command returned '{}'".format("" if not rv else rv))
return rv
def bytefmt(num, fmt="{:.2f}"):
"""
format a value of bytes to a more human readable pattern
example: 15 * 1024 becomes 15KiB
Args:
num (int): bytes
fmt (string): format
Return: string
"""
for unit in ["", "Ki", "Mi", "Gi"]:
if num < 1024.0:
return "{}{}B".format(fmt, unit).format(num)
num /= 1024.0
return "{}GiB".format(fmt).format(num*1024.0)
def durationfmt(duration, shorten=False, suffix=False):
duration = int(duration)
minutes, seconds = divmod(duration, 60)
hours, minutes = divmod(minutes, 60)
suf = "m"
res = "{:02d}:{:02d}".format(minutes, seconds)
if hours > 0:
if shorten:
res = "{:02d}:{:02d}".format(hours, minutes)
else:
res = "{:02d}:{}".format(hours, res)
suf = "h"
return "{}{}".format(res, suf if suffix else "")
def which(program):
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
localPATH = os.environ["PATH"].split(os.pathsep)
localPATH += ["/sbin", "/usr/sbin/", "/usr/local/sbin"]
for path in localPATH:
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
Fixing #543
# -*- coding: utf-8 -*-
import shlex
import logging
import subprocess
import os
try:
from exceptions import RuntimeError
except ImportError:
# Python3 doesn't require this anymore
pass
def asbool(val):
if val is None:
return False
if isinstance(val, bool):
return val
val = str(val).strip().lower()
return val in ("t", "true", "y", "yes", "on", "1")
def aslist(val):
if val is None:
return []
if isinstance(val, list):
return val
return str(val).replace(' ', '').split(',')
def execute(cmd, wait=True):
logging.info("executing command '{}'".format(cmd))
args = shlex.split(cmd)
my_env = os.environ.copy()
my_env['LC_ALL'] = "C"
proc = subprocess.Popen(args, env=my_env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
rv = None
if wait:
out, _ = proc.communicate()
if proc.returncode != 0:
raise RuntimeError("{} exited with {}".format(cmd, proc.returncode))
if hasattr(out, "decode"):
rv = out.decode("utf-8", "ignore")
else:
rv = out
logging.info(u"command returned '{}'".format("" if not rv else rv))
return rv
def bytefmt(num, fmt="{:.2f}"):
"""
format a value of bytes to a more human readable pattern
example: 15 * 1024 becomes 15KiB
Args:
num (int): bytes
fmt (string): format
Return: string
"""
for unit in ["", "Ki", "Mi", "Gi"]:
if num < 1024.0:
return "{}{}B".format(fmt, unit).format(num)
num /= 1024.0
return "{}GiB".format(fmt).format(num*1024.0)
def durationfmt(duration, shorten=False, suffix=False):
duration = int(duration)
minutes, seconds = divmod(duration, 60)
hours, minutes = divmod(minutes, 60)
suf = "m"
res = "{:02d}:{:02d}".format(minutes, seconds)
if hours > 0:
if shorten:
res = "{:02d}:{:02d}".format(hours, minutes)
else:
res = "{:02d}:{}".format(hours, res)
suf = "h"
return "{}{}".format(res, suf if suffix else "")
def which(program):
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
localPATH = os.environ["PATH"].split(os.pathsep)
localPATH += ["/sbin", "/usr/sbin/", "/usr/local/sbin"]
for path in localPATH:
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
import csv
toCSV = [{'name':'bob','age':25,'weight':200},
{'name':'jim','age':31,'weight':180}]
keys = toCSV[0].keys()
with open('people.csv', 'wb') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(toCSV)
Remove script not used
|
import subprocess
import numpy as np
from osgeo import gdal
import utilities
import glob
import os
import sys
import shutil
currentdir = os.path.dirname(os.path.abspath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
import get_extent
def process_ba(global_grid_hv):
for year in range (2000, 2016):
output_dir = '{0}/{1}/raw/'.format(global_grid_hv, year)
os.makedirs(output_dir)
include = '*A{0}*{1}*'.format(year, global_grid_hv)
cmd = ['aws', 's3', 'cp', 's3://gfw-files/sam/carbon_budget/burn_raw/', output_dir, '--recursive', '--exclude', "*", '--include', include]
subprocess.check_call(cmd)
hdf_files = glob.glob(output_dir+"*hdf")
if len(hdf_files) > 0:
array_list = []
for hdf in hdf_files:
# convert each hdf to a tif
tif = utilities.hdf_to_tif(hdf)
array = utilities.raster_to_array(tif)
array_list.append(array)
# stack arrays, get 1 raster for the year and tile
stacked_year_array = utilities.stack_arrays(array_list)
max_stacked_year_array = stacked_year_array.max(0)
# convert stacked month arrays to 1 raster for the year
rasters = glob.glob("burndate_{0}*_{1}.tif".format(year, global_grid_hv))
template_raster = rasters[0]
year_folder ='{0}/{1}/stacked/'.format(global_grid_hv, year)
if not os.path.exists(year_folder):
os.makedirs(year_folder)
stacked_year_raster = utilities.array_to_raster(global_grid_hv, year, max_stacked_year_array, template_raster, year_folder)
proj_com_tif = utilities.set_proj(stacked_year_raster)
# after year raster is stacked, write it to text for vrt creation
with open('year_list.txt', 'w') as list_of_ba_years:
list_of_ba_years.write(proj_com_tif + "\n")
# upload to somewhere on s3
cmd = ['aws', 's3', 'cp', proj_com_tif, 's3://gfw-files/sam/carbon_budget/burn_year/']
subprocess.check_call(cmd)
# remove files
shutil.rmtree(year_folder)
shutil.rmtree(output_dir)
burndate_name = "burndate_{0}*_{1}.tif".format(year, global_grid_hv)
burndate_day_tif = glob.glob(burndate_name)
for tif in burndate_day_tif:
os.remove(tif)
else:
pass
def clip_year_tiles(year):
# download all hv tifs for this year
include = '{0}_*_wgs84_comp.tif'.format(year)
year_tifs_folder = "{}_year_tifs".format(year)
cmd = ['aws', 's3', 'cp', 's3://gfw-files/sam/carbon_budget/burn_year/', year_tifs_folder, '--recursive', '--exclude', "*", '--include', include]
vrt_name = "global_vrt_{}.vrt".format(year)
file_path = "ba_{0}/*{0}*comp.tif".format(year)
# change this to build vrt based on directory year_tifs_folder
cmd = ['aws', 's3', 'cp', ]
cmd = ['gdalbuildvrt', '-input_file_list', 'year_list.txt', vrt_name]
subprocess.check_call(cmd)
# clip vrt to hansen tile extent
tile_list = ['10N_110E']
for tile_id in tile_list:
# download hansen tile
# get coords of hansen tile
ymax, xmin, ymin, xmax = utilities.coords(tile_id)
clipped_raster = "ba_{0}_{1}.tif".format(year, tile_id)
cmd = ['gdal_translate', '-ot', 'Byte', '-co', 'COMPRESS=LZW', '-a_nodata', '0',
vrt_name, clipped_raster, '-tr', '.00025', '.00025', '-projwin', str(xmin), str(ymax), str(xmax), str(ymin)]
subprocess.check_call(cmd)
cmd = ['aws', 's3', 'mv', clipped_raster, 's3://gfw-files/sam/carbon_budget/burn_year_10degtiles/']
subprocess.check_call(cmd)
# rm viles
os.remove('year_list.txt')
comp_tifs = glob.glob('ba_{0}/{0}_{1}_wgs84_comp.tif'.format(year, global_grid_hv))
for tif in comp_tifs:
os.remove(tif)
day_tifs = glob.glob("burndate_*{0}.tif".format(global_grid_hv))
for daytif in day_tifs:
os.remove(daytif)
writes and uploads clipped burn year tile
import subprocess
import numpy as np
from osgeo import gdal
import utilities
import glob
import os
import sys
import shutil
currentdir = os.path.dirname(os.path.abspath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
import get_extent
def process_ba(global_grid_hv):
for year in range (2000, 2016):
output_dir = '{0}/{1}/raw/'.format(global_grid_hv, year)
os.makedirs(output_dir)
include = '*A{0}*{1}*'.format(year, global_grid_hv)
cmd = ['aws', 's3', 'cp', 's3://gfw-files/sam/carbon_budget/burn_raw/', output_dir, '--recursive', '--exclude', "*", '--include', include]
subprocess.check_call(cmd)
hdf_files = glob.glob(output_dir+"*hdf")
if len(hdf_files) > 0:
array_list = []
for hdf in hdf_files:
# convert each hdf to a tif
tif = utilities.hdf_to_tif(hdf)
array = utilities.raster_to_array(tif)
array_list.append(array)
# stack arrays, get 1 raster for the year and tile
stacked_year_array = utilities.stack_arrays(array_list)
max_stacked_year_array = stacked_year_array.max(0)
# convert stacked month arrays to 1 raster for the year
rasters = glob.glob("burndate_{0}*_{1}.tif".format(year, global_grid_hv))
template_raster = rasters[0]
year_folder ='{0}/{1}/stacked/'.format(global_grid_hv, year)
if not os.path.exists(year_folder):
os.makedirs(year_folder)
stacked_year_raster = utilities.array_to_raster(global_grid_hv, year, max_stacked_year_array, template_raster, year_folder)
proj_com_tif = utilities.set_proj(stacked_year_raster)
# after year raster is stacked, write it to text for vrt creation
with open('year_list.txt', 'w') as list_of_ba_years:
list_of_ba_years.write(proj_com_tif + "\n")
# upload to somewhere on s3
cmd = ['aws', 's3', 'cp', proj_com_tif, 's3://gfw-files/sam/carbon_budget/burn_year/']
subprocess.check_call(cmd)
# remove files
shutil.rmtree(year_folder)
shutil.rmtree(output_dir)
burndate_name = "burndate_{0}*_{1}.tif".format(year, global_grid_hv)
burndate_day_tif = glob.glob(burndate_name)
for tif in burndate_day_tif:
os.remove(tif)
else:
pass
def clip_year_tiles(year):
# download all hv tifs for this year
include = '{0}_*_wgs84_comp.tif'.format(year)
year_tifs_folder = "{}_year_tifs".format(year)
if not os.path.exists(year_tifs_folder):
os.mkdir(year_tifs_folder)
cmd = ['aws', 's3', 'cp', 's3://gfw-files/sam/carbon_budget/burn_year/', year_tifs_folder, '--recursive', '--exclude', "*", '--include', include]
subprocess.check_call(cmd)
vrt_name = "global_vrt_{}.vrt".format(year)
# build list of vrt files (command wont take folder/*.tif)
vrt_source_folder = "{}/*.tif".format(year_tifs_folder)
with open('vrt_files.txt', 'w') as vrt_files:
vrt_tifs = glob.glob(year_tifs_folder + "/*")
for tif in vrt_tifs:
vrt_files.write(tif + "\n")
cmd = ['gdalbuildvrt', '-input_file_list', 'vrt_files.txt', vrt_name]
subprocess.check_call(cmd)
# clip vrt to hansen tile extent
tile_list = ['00N_130E']
for tile_id in tile_list:
# get coords of hansen tile
ymax, xmin, ymin, xmax = utilities.coords(tile_id)
clipped_raster = "ba_{0}_{1}_clipped.tif".format(year, tile_id)
cmd = ['gdal_translate', '-ot', 'Byte', '-co', 'COMPRESS=LZW', '-a_nodata', '0',
vrt_name, clipped_raster, '-tr', '.00025', '.00025', '-projwin', str(xmin), str(ymax), str(xmax), str(ymin)]
subprocess.check_call(cmd)
# calc year tile values to be equal to year
calc = '--calc={}*(A>0)'.format(int(year)-2000)
recoded_output = "ba_{0}_{1}.tif".format(year, tile_id)
outfile = '--outfile={}'.format(recoded_output)
cmd = ['gdal_calc.py', '-A', clipped_raster, calc, outfile, '--NoDataValue=0', '--co', 'COMPRESS=LZW']
subprocess.check_call(cmd)
# upload file
cmd = ['aws', 's3', 'mv', recoded_output, 's3://gfw-files/sam/carbon_budget/burn_year_10degtiles/']
subprocess.check_call(cmd)
# rm files
os.remove('vrt_files.txt')
cmd = ['rm', year_tifs_folder+ "/", '-r']
subprocess.check_call(cmd)
os.remove(clipped_raster)
clip_year_tiles('2000')
|
from pcas import Driver, SimpleServer
import time
if __name__ == '__main__':
driver = Driver()
server = SimpleServer()
from db import pvdb, prefix
for pvname in pvdb:
info = pvdb[pvname]
pv = server.createPV(prefix, pvname, info, driver)
driver.registerPV(pv)
while True:
# process CA transactions
server.process(0.01)
# give other thread a chance
time.sleep(0.01)
shebang
#!/usr/bin/env python
from pcas import Driver, SimpleServer
import time
if __name__ == '__main__':
driver = Driver()
server = SimpleServer()
from db import pvdb, prefix
for pvname in pvdb:
info = pvdb[pvname]
pv = server.createPV(prefix, pvname, info, driver)
driver.registerPV(pv)
while True:
# process CA transactions
server.process(0.01)
# give other thread a chance
time.sleep(0.01)
|
from django.conf.urls import patterns, include, url
import settings
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
url(r'^', include('core.urls')),
#url(r'^login/$', 'django.contrib.auth.views.login', {'template_name': 'core/login.html'}, name='login'),
#url(r'^logout/$', 'django.contrib.auth.views.logout', {'next_page': '/'}, name='logout'),
#url(r'^register/$', 'core.views.register', name='register'),
# Uncomment the admin/doc line below to enable admin documentation:
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
'''if not settings.DEBUG:
urlpatterns += patterns('',
(r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT}),
)'''
removed static serve url
from django.conf.urls import patterns, include, url
import settings
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
url(r'^', include('core.urls')),
#url(r'^login/$', 'django.contrib.auth.views.login', {'template_name': 'core/login.html'}, name='login'),
#url(r'^logout/$', 'django.contrib.auth.views.logout', {'next_page': '/'}, name='logout'),
#url(r'^register/$', 'core.views.register', name='register'),
# Uncomment the admin/doc line below to enable admin documentation:
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
) |
from django import forms
from django.contrib import admin
from django.contrib.gis.db.models import PointField
from django.contrib.gis.forms import OSMWidget
from django.contrib.postgres.aggregates import StringAgg
from django.contrib.postgres.search import SearchQuery, SearchRank
from django.db.models import Count, Q, F, Exists, OuterRef, CharField
from django.db.models.functions import Cast
from django.urls import reverse
from django.utils.html import format_html
from sql_util.utils import SubqueryCount
from bustimes.models import Route
from . import models
@admin.register(models.AdminArea)
class AdminAreaAdmin(admin.ModelAdmin):
list_display = ('name', 'id', 'atco_code', 'region_id')
list_filter = ('region_id',)
search_fields = ('atco_code',)
class StopCodeInline(admin.TabularInline):
model = models.StopCode
raw_id_fields = ['source']
@admin.register(models.StopPoint)
class StopPointAdmin(admin.ModelAdmin):
list_display = ['atco_code', 'naptan_code', 'locality', 'admin_area', '__str__']
list_select_related = ['locality', 'admin_area']
list_filter = ['stop_type', 'service__region', 'admin_area']
raw_id_fields = ['places', 'admin_area']
search_fields = ['atco_code']
ordering = ['atco_code']
formfield_overrides = {
PointField: {'widget': OSMWidget}
}
inlines = [StopCodeInline]
show_full_result_count = False
def get_search_results(self, request, queryset, search_term):
if not search_term:
return super().get_search_results(request, queryset, search_term)
query = SearchQuery(search_term, search_type="websearch", config="english")
rank = SearchRank(F('locality__search_vector'), query)
query = Q(locality__search_vector=query)
if ' ' not in search_term:
query |= Q(atco_code=search_term)
queryset = queryset.annotate(rank=rank).filter(query).order_by("-rank")
return queryset, False
@admin.register(models.StopCode)
class StopCodeAdmin(admin.ModelAdmin):
list_display = ['stop', 'code', 'source']
raw_id_fields = ['stop', 'source']
class OperatorCodeInline(admin.TabularInline):
model = models.OperatorCode
class OperatorAdminForm(forms.ModelForm):
class Meta:
widgets = {
'address': forms.Textarea,
'twitter': forms.Textarea,
}
@admin.register(models.Operator)
class OperatorAdmin(admin.ModelAdmin):
form = OperatorAdminForm
list_display = ['name', 'operator_codes', 'id', 'vehicle_mode', 'parent', 'region_id',
'services', 'vehicles', 'twitter']
list_filter = ('region', 'vehicle_mode', 'payment_methods', 'parent')
search_fields = ('id', 'name')
raw_id_fields = ('region', 'regions', 'siblings', 'colour')
inlines = [OperatorCodeInline]
readonly_fields = ['search_vector']
prepopulated_fields = {"slug": ("name",)}
autocomplete_fields = ('licences',)
def get_queryset(self, request):
queryset = super().get_queryset(request)
if 'changelist' in request.resolver_match.view_name:
return queryset.annotate(
services=SubqueryCount('service', filter=Q(service__current=True)),
vehicles=SubqueryCount('vehicle')
).prefetch_related('operatorcode_set')
return queryset
@admin.display(ordering='services')
def services(self, obj):
url = reverse('admin:busstops_service_changelist')
return format_html('<a href="{}?operator__id__exact={}">{}</a>', url, obj.id, obj.services)
@admin.display(ordering='vehicles')
def vehicles(self, obj):
url = reverse('admin:vehicles_vehicle_changelist')
return format_html('<a href="{}?operator__id__exact={}">{}</a>', url, obj.id, obj.vehicles)
def get_search_results(self, request, queryset, search_term):
queryset, use_distinct = super().get_search_results(request, queryset, search_term)
if request.path.endswith('/autocomplete/'):
queryset = queryset.filter(Exists(models.Service.objects.filter(operator=OuterRef('pk'), current=True)))
return queryset, use_distinct
@staticmethod
def payment(obj):
return ', '.join(str(code) for code in obj.payment_methods.all())
@staticmethod
def operator_codes(obj):
return ', '.join(str(code) for code in obj.operatorcode_set.all())
class ServiceCodeInline(admin.TabularInline):
model = models.ServiceCode
class RouteInline(admin.TabularInline):
model = Route
show_change_link = True
fields = ['source', 'code', 'service_code']
raw_id_fields = ['source']
class FromServiceLinkInline(admin.TabularInline):
model = models.ServiceLink
fk_name = 'from_service'
autocomplete_fields = ['to_service']
class ToServiceLinkInline(FromServiceLinkInline):
fk_name = 'to_service'
autocomplete_fields = ['from_service']
@admin.register(models.Service)
class ServiceAdmin(admin.ModelAdmin):
list_display = ('__str__', 'service_code', 'mode', 'region_id',
'current', 'show_timetable', 'timetable_wrong', 'colour', 'line_brand')
list_filter = ('current', 'show_timetable', 'timetable_wrong', 'mode', 'region',
('source', admin.RelatedOnlyFieldListFilter),
('operator', admin.RelatedOnlyFieldListFilter))
search_fields = ('service_code', 'line_name', 'line_brand', 'description')
raw_id_fields = ('operator', 'stops', 'colour', 'source')
inlines = [ServiceCodeInline, RouteInline, FromServiceLinkInline, ToServiceLinkInline]
readonly_fields = ['search_vector']
list_editable = ['colour', 'line_brand']
list_select_related = ['colour']
def get_search_results(self, request, queryset, search_term):
if search_term and request.path.endswith('/autocomplete/'):
queryset = queryset.filter(current=True)
query = SearchQuery(search_term, search_type="websearch", config="english")
rank = SearchRank(F('search_vector'), query)
queryset = (
queryset.annotate(rank=rank)
.filter(Q(search_vector=query) | Q(service_code=search_term))
.order_by("-rank")
)
return queryset, False
return super().get_search_results(request, queryset, search_term)
@admin.register(models.ServiceLink)
class ServiceLinkAdmin(admin.ModelAdmin):
save_as = True
list_display = ('from_service', 'from_service__current', 'to_service', 'to_service__current', 'how')
list_filter = ('from_service__current', 'to_service__current', 'from_service__source', 'to_service__source')
autocomplete_fields = ('from_service', 'to_service')
@staticmethod
def from_service__current(obj):
return obj.from_service.current
@staticmethod
def to_service__current(obj):
return obj.to_service.current
@admin.register(models.Locality)
class LocalityAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'slug')
search_fields = ('id', 'name')
raw_id_fields = ('adjacent',)
list_filter = ('admin_area', 'admin_area__region')
@admin.register(models.OperatorCode)
class OperatorCodeAdmin(admin.ModelAdmin):
save_as = True
list_display = ('id', 'operator', 'source', 'code')
list_filter = [
('source', admin.RelatedOnlyFieldListFilter)
]
search_fields = ('code',)
raw_id_fields = ('operator',)
@admin.register(models.ServiceCode)
class ServiceCodeAdmin(admin.ModelAdmin):
list_display = ['id', 'service', 'scheme', 'code']
list_filter = [
'scheme',
'service__current',
('service__operator', admin.RelatedOnlyFieldListFilter),
'service__stops__admin_area'
]
search_fields = ['code', 'service__line_name', 'service__description']
autocomplete_fields = ['service']
@admin.register(models.ServiceColour)
class ServiceColourAdmin(admin.ModelAdmin):
list_display = ['preview', 'foreground', 'background', 'services']
search_fields = ['name']
list_filter = [
('service__operator', admin.EmptyFieldListFilter)
]
def get_queryset(self, request):
queryset = super().get_queryset(request)
if 'changelist' in request.resolver_match.view_name:
queryset = queryset.annotate(services=Count('service', filter=Q(service__current=True)))
return queryset
def services(self, obj):
return obj.services
@admin.register(models.Place)
class PlaceAdmin(admin.ModelAdmin):
list_filter = ('source',)
search_fields = ('name',)
@admin.register(models.DataSource)
class DataSourceAdmin(admin.ModelAdmin):
search_fields = ('name', 'url')
list_display = ('name', 'url', 'sha1', 'datetime', 'settings', 'routes', 'services', 'journeys')
list_filter = (
('route', admin.EmptyFieldListFilter),
('service', admin.EmptyFieldListFilter),
('vehiclejourney', admin.EmptyFieldListFilter)
)
actions = ['delete_routes', 'remove_datetimes']
show_full_result_count = False
def get_queryset(self, request):
queryset = super().get_queryset(request)
if 'changelist' in request.resolver_match.view_name:
return queryset.annotate(
routes=SubqueryCount('route'),
services=SubqueryCount('service', filter=Q(current=True)),
journeys=SubqueryCount('vehiclejourney'),
).prefetch_related('operatorcode_set')
return queryset
def routes(self, obj):
url = reverse('admin:bustimes_route_changelist')
return format_html('<a href="{}?source__id__exact={}">{}</a>', url, obj.id, obj.routes)
routes.admin_order_field = 'routes'
def services(self, obj):
url = reverse('admin:busstops_service_changelist')
return format_html('<a href="{}?source__id__exact={}">{}</a>', url, obj.id, obj.services)
services.admin_order_field = 'services'
def journeys(self, obj):
url = reverse('admin:vehicles_vehiclejourney_changelist')
return format_html('<a href="{}?source__id__exact={}">{}</a>', url, obj.id, obj.journeys)
journeys.admin_order_field = 'journeys'
def delete_routes(self, request, queryset):
result = Route.objects.filter(source__in=queryset).delete()
self.message_user(request, result)
def remove_datetimes(self, request, queryset):
result = queryset.order_by().update(datetime=None)
self.message_user(request, result)
@admin.register(models.SIRISource)
class SIRISourceAdmin(admin.ModelAdmin):
list_display = ('name', 'url', 'requestor_ref', 'areas', 'get_poorly')
def get_queryset(self, request):
queryset = super().get_queryset(request)
if 'changelist' in request.resolver_match.view_name:
return queryset.annotate(
areas=StringAgg(Cast('admin_areas__atco_code', output_field=CharField()), ', ')
)
return queryset
@staticmethod
def areas(obj):
return obj.areas
class PaymentMethodOperatorInline(admin.TabularInline):
model = models.PaymentMethod.operator_set.through
autocomplete_fields = ['operator']
@admin.register(models.PaymentMethod)
class PaymentMethodAdmin(admin.ModelAdmin):
list_display = ('name', 'url', 'operators')
inlines = [PaymentMethodOperatorInline]
def get_queryset(self, request):
queryset = super().get_queryset(request)
if 'changelist' in request.resolver_match.view_name:
return queryset.annotate(operators=StringAgg('operator', ', ', distinct=True))
return queryset
@staticmethod
def operators(obj):
return obj.operators
admin.site.register(models.Region)
admin.site.register(models.District)
admin.site.register(models.StopArea)
Exists quicker than count
from django import forms
from django.contrib import admin
from django.contrib.gis.db.models import PointField
from django.contrib.gis.forms import OSMWidget
from django.contrib.postgres.aggregates import StringAgg
from django.contrib.postgres.search import SearchQuery, SearchRank
from django.db.models import Count, Q, F, Exists, OuterRef, CharField
from django.db.models.functions import Cast
from django.urls import reverse
from django.utils.html import format_html
from sql_util.utils import SubqueryCount
from bustimes.models import Route
from vehicles.models import VehicleJourney
from . import models
@admin.register(models.AdminArea)
class AdminAreaAdmin(admin.ModelAdmin):
list_display = ('name', 'id', 'atco_code', 'region_id')
list_filter = ('region_id',)
search_fields = ('atco_code',)
class StopCodeInline(admin.TabularInline):
model = models.StopCode
raw_id_fields = ['source']
@admin.register(models.StopPoint)
class StopPointAdmin(admin.ModelAdmin):
list_display = ['atco_code', 'naptan_code', 'locality', 'admin_area', '__str__']
list_select_related = ['locality', 'admin_area']
list_filter = ['stop_type', 'service__region', 'admin_area']
raw_id_fields = ['places', 'admin_area']
search_fields = ['atco_code']
ordering = ['atco_code']
formfield_overrides = {
PointField: {'widget': OSMWidget}
}
inlines = [StopCodeInline]
show_full_result_count = False
def get_search_results(self, request, queryset, search_term):
if not search_term:
return super().get_search_results(request, queryset, search_term)
query = SearchQuery(search_term, search_type="websearch", config="english")
rank = SearchRank(F('locality__search_vector'), query)
query = Q(locality__search_vector=query)
if ' ' not in search_term:
query |= Q(atco_code=search_term)
queryset = queryset.annotate(rank=rank).filter(query).order_by("-rank")
return queryset, False
@admin.register(models.StopCode)
class StopCodeAdmin(admin.ModelAdmin):
list_display = ['stop', 'code', 'source']
raw_id_fields = ['stop', 'source']
class OperatorCodeInline(admin.TabularInline):
model = models.OperatorCode
class OperatorAdminForm(forms.ModelForm):
class Meta:
widgets = {
'address': forms.Textarea,
'twitter': forms.Textarea,
}
@admin.register(models.Operator)
class OperatorAdmin(admin.ModelAdmin):
form = OperatorAdminForm
list_display = ['name', 'operator_codes', 'id', 'vehicle_mode', 'parent', 'region_id',
'services', 'vehicles', 'twitter']
list_filter = ('region', 'vehicle_mode', 'payment_methods', 'parent')
search_fields = ('id', 'name')
raw_id_fields = ('region', 'regions', 'siblings', 'colour')
inlines = [OperatorCodeInline]
readonly_fields = ['search_vector']
prepopulated_fields = {"slug": ("name",)}
autocomplete_fields = ('licences',)
def get_queryset(self, request):
queryset = super().get_queryset(request)
if 'changelist' in request.resolver_match.view_name:
return queryset.annotate(
services=SubqueryCount('service', filter=Q(service__current=True)),
vehicles=SubqueryCount('vehicle')
).prefetch_related('operatorcode_set')
return queryset
@admin.display(ordering='services')
def services(self, obj):
url = reverse('admin:busstops_service_changelist')
return format_html('<a href="{}?operator__id__exact={}">{}</a>', url, obj.id, obj.services)
@admin.display(ordering='vehicles')
def vehicles(self, obj):
url = reverse('admin:vehicles_vehicle_changelist')
return format_html('<a href="{}?operator__id__exact={}">{}</a>', url, obj.id, obj.vehicles)
def get_search_results(self, request, queryset, search_term):
queryset, use_distinct = super().get_search_results(request, queryset, search_term)
if request.path.endswith('/autocomplete/'):
queryset = queryset.filter(Exists(models.Service.objects.filter(operator=OuterRef('pk'), current=True)))
return queryset, use_distinct
@staticmethod
def payment(obj):
return ', '.join(str(code) for code in obj.payment_methods.all())
@staticmethod
def operator_codes(obj):
return ', '.join(str(code) for code in obj.operatorcode_set.all())
class ServiceCodeInline(admin.TabularInline):
model = models.ServiceCode
class RouteInline(admin.TabularInline):
model = Route
show_change_link = True
fields = ['source', 'code', 'service_code']
raw_id_fields = ['source']
class FromServiceLinkInline(admin.TabularInline):
model = models.ServiceLink
fk_name = 'from_service'
autocomplete_fields = ['to_service']
class ToServiceLinkInline(FromServiceLinkInline):
fk_name = 'to_service'
autocomplete_fields = ['from_service']
@admin.register(models.Service)
class ServiceAdmin(admin.ModelAdmin):
list_display = ('__str__', 'service_code', 'mode', 'region_id',
'current', 'show_timetable', 'timetable_wrong', 'colour', 'line_brand')
list_filter = ('current', 'show_timetable', 'timetable_wrong', 'mode', 'region',
('source', admin.RelatedOnlyFieldListFilter),
('operator', admin.RelatedOnlyFieldListFilter))
search_fields = ('service_code', 'line_name', 'line_brand', 'description')
raw_id_fields = ('operator', 'stops', 'colour', 'source')
inlines = [ServiceCodeInline, RouteInline, FromServiceLinkInline, ToServiceLinkInline]
readonly_fields = ['search_vector']
list_editable = ['colour', 'line_brand']
list_select_related = ['colour']
def get_search_results(self, request, queryset, search_term):
if search_term and request.path.endswith('/autocomplete/'):
queryset = queryset.filter(current=True)
query = SearchQuery(search_term, search_type="websearch", config="english")
rank = SearchRank(F('search_vector'), query)
queryset = (
queryset.annotate(rank=rank)
.filter(Q(search_vector=query) | Q(service_code=search_term))
.order_by("-rank")
)
return queryset, False
return super().get_search_results(request, queryset, search_term)
@admin.register(models.ServiceLink)
class ServiceLinkAdmin(admin.ModelAdmin):
save_as = True
list_display = ('from_service', 'from_service__current', 'to_service', 'to_service__current', 'how')
list_filter = ('from_service__current', 'to_service__current', 'from_service__source', 'to_service__source')
autocomplete_fields = ('from_service', 'to_service')
@staticmethod
def from_service__current(obj):
return obj.from_service.current
@staticmethod
def to_service__current(obj):
return obj.to_service.current
@admin.register(models.Locality)
class LocalityAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'slug')
search_fields = ('id', 'name')
raw_id_fields = ('adjacent',)
list_filter = ('admin_area', 'admin_area__region')
@admin.register(models.OperatorCode)
class OperatorCodeAdmin(admin.ModelAdmin):
save_as = True
list_display = ('id', 'operator', 'source', 'code')
list_filter = [
('source', admin.RelatedOnlyFieldListFilter)
]
search_fields = ('code',)
raw_id_fields = ('operator',)
@admin.register(models.ServiceCode)
class ServiceCodeAdmin(admin.ModelAdmin):
list_display = ['id', 'service', 'scheme', 'code']
list_filter = [
'scheme',
'service__current',
('service__operator', admin.RelatedOnlyFieldListFilter),
'service__stops__admin_area'
]
search_fields = ['code', 'service__line_name', 'service__description']
autocomplete_fields = ['service']
@admin.register(models.ServiceColour)
class ServiceColourAdmin(admin.ModelAdmin):
list_display = ['preview', 'foreground', 'background', 'services']
search_fields = ['name']
list_filter = [
('service__operator', admin.EmptyFieldListFilter)
]
def get_queryset(self, request):
queryset = super().get_queryset(request)
if 'changelist' in request.resolver_match.view_name:
queryset = queryset.annotate(services=Count('service', filter=Q(service__current=True)))
return queryset
def services(self, obj):
return obj.services
@admin.register(models.Place)
class PlaceAdmin(admin.ModelAdmin):
list_filter = ('source',)
search_fields = ('name',)
@admin.register(models.DataSource)
class DataSourceAdmin(admin.ModelAdmin):
search_fields = ('name', 'url')
list_display = ('name', 'url', 'sha1', 'datetime', 'settings', 'routes', 'services', 'journeys')
list_filter = (
('route', admin.EmptyFieldListFilter),
('service', admin.EmptyFieldListFilter),
('vehiclejourney', admin.EmptyFieldListFilter)
)
actions = ['delete_routes', 'remove_datetimes']
show_full_result_count = False
def get_queryset(self, request):
queryset = super().get_queryset(request)
if 'changelist' in request.resolver_match.view_name:
return queryset.annotate(
routes=SubqueryCount('route'),
services=SubqueryCount('service', filter=Q(current=True)),
journeys=Exists(VehicleJourney.objects.filter(source=OuterRef('id'))),
).prefetch_related('operatorcode_set')
return queryset
def routes(self, obj):
url = reverse('admin:bustimes_route_changelist')
return format_html('<a href="{}?source__id__exact={}">{}</a>', url, obj.id, obj.routes)
routes.admin_order_field = 'routes'
def services(self, obj):
url = reverse('admin:busstops_service_changelist')
return format_html('<a href="{}?source__id__exact={}">{}</a>', url, obj.id, obj.services)
services.admin_order_field = 'services'
def journeys(self, obj):
url = reverse('admin:vehicles_vehiclejourney_changelist')
return format_html('<a href="{}?source__id__exact={}">{}</a>', url, obj.id, obj.journeys)
journeys.admin_order_field = 'journeys'
def delete_routes(self, request, queryset):
result = Route.objects.filter(source__in=queryset).delete()
self.message_user(request, result)
def remove_datetimes(self, request, queryset):
result = queryset.order_by().update(datetime=None)
self.message_user(request, result)
@admin.register(models.SIRISource)
class SIRISourceAdmin(admin.ModelAdmin):
list_display = ('name', 'url', 'requestor_ref', 'areas', 'get_poorly')
def get_queryset(self, request):
queryset = super().get_queryset(request)
if 'changelist' in request.resolver_match.view_name:
return queryset.annotate(
areas=StringAgg(Cast('admin_areas__atco_code', output_field=CharField()), ', ')
)
return queryset
@staticmethod
def areas(obj):
return obj.areas
class PaymentMethodOperatorInline(admin.TabularInline):
model = models.PaymentMethod.operator_set.through
autocomplete_fields = ['operator']
@admin.register(models.PaymentMethod)
class PaymentMethodAdmin(admin.ModelAdmin):
list_display = ('name', 'url', 'operators')
inlines = [PaymentMethodOperatorInline]
def get_queryset(self, request):
queryset = super().get_queryset(request)
if 'changelist' in request.resolver_match.view_name:
return queryset.annotate(operators=StringAgg('operator', ', ', distinct=True))
return queryset
@staticmethod
def operators(obj):
return obj.operators
admin.site.register(models.Region)
admin.site.register(models.District)
admin.site.register(models.StopArea)
|
from datetime import timedelta
from django.conf import settings
from django.contrib import messages
from django.core.cache import cache
from django.http import HttpRequest
from django.utils import translation
from functools import wraps
from intpacker import pack_int
from itertools import chain
import hashlib
import inspect
import re
import string
import time
# A memcached limit.
MAX_KEY_LENGTH = 250
# String containing all invalid characters to strip from memcached keys.
# Contains all control characters from C0 (0x00-0x20 and 0x7F),
# and C1 (0x80-0x9F) as defined by the ISO-2022 and ECMA-48 standards.
#
# References:
# http://code.sixapart.com/svn/memcached/trunk/server/doc/protocol.txt
# http://www.unicode.org/charts/PDF/U0000.pdf
# http://www.unicode.org/charts/PDF/U0080.pdf
_CONTROL_CODE_CHARS = ''.join(chr(i) for i in chain(xrange(0x20 + 1),
[0x7f],
xrange(0x80, 0x9f + 1)))
# String containing all ASCII characters, used for string#translate which
# is an efficient way of deleting characters from a string.
_ALL_CHARS = string.maketrans('', '')
def _format_key_arg(arg):
'''
Selectively formats args passed to `make_key`. Defaults to serializing
into a Unicode string and then encoding in UTF-8.
'''
to_string = lambda x: unicode(x).encode('utf8')
if isinstance(arg, dict):
# `str` is wasteful for dicts, for our case here.
s = ','.join([to_string(key) + ':' + to_string(val)
for key,val in arg.items()])
else:
s = to_string(arg)
# Strip control characters and spaces (which memcached won't allow).
return s.translate(_ALL_CHARS, _CONTROL_CODE_CHARS)
def make_key(*args):
'''
This does a couple things to cleanly make a key out of the given arguments:
1. Removes any control code characters and spaces [1] (which are
illegal in memcached keys [2].)
2. After serializing all arguments and joining them into one string, if
the resulting length is > MAX_KEY_LENGTH bytes (250 by default,
which is the memcached protocol limit), it generates a hash out of
the key instead.
It's possible the resulting key would be empty, so choose your args
carefully to avoid this.
TODO a further refinement of this would be to hash only the smallest part
necessary to get it under the limit. Don't hash an entire key just for
being 1 char too long. This would improve readability.
[1] http://www.unicode.org/charts/PDF/U0000.pdf
http://www.unicode.org/charts/PDF/U0080.pdf
[2] http://code.sixapart.com/svn/memcached/trunk/server/doc/protocol.txt
'''
key = '.'.join(map(_format_key_arg, filter(lambda x: x is not None, args)))
# If our key is too long, hash the part after the prefix,
# and truncate as needed.
if len(cache.make_key(key)) > MAX_KEY_LENGTH:
prefix = cache.make_key('')
# Just to be safe... we should be able to have a key >= 1 char long :)
if len(prefix) >= MAX_KEY_LENGTH:
raise Exception('Your cache key prefixes are too long.')
key = hashlib.md5(key).hexdigest()[:MAX_KEY_LENGTH - len(prefix)]
return key
def _make_keys_from_function(func, *args, **kwargs):
'''
Add a bunch of hopefully uniquely identifying parameters to a list to be
passed to `make_key`. It's pretty crafty in finding distinguishing params
to use, but it is slightly conservative so as to not overrun the memcached
key length limit, which results in a non-human-readable hash for a key.
'''
keys = ['cached_func', func.__name__]
# This works on both functions and class methods.
signature_args = inspect.getargspec(func).args
if (inspect.ismethod(func)
or (signature_args and signature_args[0] in ['self', 'cls'])):
# Method, probably.
#
# If ismethod returns True, it's definitely a method. Otherwise,
# we have to guess based on the first arg of the function's signature.
# This is the best guess we can have in Python, because the way a
# function is passed to a decorator inside a class definition, the
# decorated function is as yet neither a bound nor unbound method. It's
# just a regular function. So we must guess from its args.
#
# A guess is good enough, since it only means that we add some extra
# fields from the first arg. If we're wrong, the key is more
# conservative (more detailed) than need be. We could wrongly call it a
# function when it's actually a method, but only if they're doing
# unsightly things like naming the "self" or "cls" arg something else.
self = args[0]
keys.append(self.__class__.__name__)
if hasattr(self, 'pk'): # django model? `pk` is a great differentiator!
keys.append(self.pk)
keys.extend(args[1:])
else:
# Function.
keys.extend(args)
keys.extend(kwargs.values())
# To be extra safe! (unreadable, so at end of key.)
# If this results in any collisions, it actually won't make a difference.
# It's fine to memoize functions that collide on this as if
# they are one, since they're identical if their codeblock hash is the same.
keys.append(pack_int(func.__code__.__hash__()))
return keys
def _timedelta_to_seconds(t):
'''
Returns an int.
Tries to use Python 2.7's timedelta#total_seconds, if available.
'''
try:
return int(t.total_seconds())
except AttributeError:
return int(t.microseconds + (t.seconds + t.days * 86400))
def _make_namespace_key(namespace):
'''
Returns a likely-to-be-unique value that can be incremented with
`cache.incr`.
'''
# Use (an overly-cautious) time-since-epoch modulo decade, in nanoseconds.
decade_s = 3600 * 24 * 365 * 10 # decade in seconds.
return int((time.time() % decade_s) * 1e9)
def _get_namespace_key(namespace):
'''
Gets (or sets if uninitialized) the key prefix for the given namespace. The
return value is used to prefix any keys that belong to the namespace.
'''
ns_key = cache.get(namespace)
if not ns_key:
ns_key = _make_namespace_key(namespace)
cache.set(namespace, ns_key)
# Compact the key before returning it to save space when using it.
return pack_int(ns_key)
def _process_namespace_name(namespace):
'''
A namespace can be any serializable object or list of objects, not just a
string. This serializes the namespace name by passing it to `make_key`.
'''
# Don't explode strings.
if isinstance(namespace, str):
return make_key(namespace)
# First try as if it's an iterable.
try:
return make_key(*namespace)
except TypeError: # It might not be an iterable.
return make_key(namespace)
def invalidate_namespace(namespace):
'''
If the namespace is already invalid (i.e. the namespace key has been
deleted from the cache), this does nothing.
This operation is atomic as long as the cache backend's `incr` is too.
It is an O(1) operation, independent of the number of keys in a namespace.
'''
namespace = _process_namespace_name(namespace)
try:
cache.incr(namespace)
except ValueError:
# The namespace is already invalid, since its key is gone.
pass
def _make_key(keys, namespace, func, args, kwargs):
'''
Returns the cache key to use for the decorated function. Calls and replaces
any callable items in `keys` with their return values before sending `keys`
over to `make_key`. Does the same for a callable `namespace`.
'''
keys = keys or _make_keys_from_function(func, *args, **kwargs)
def call_if_callable(key_arg):
if callable(key_arg):
return key_arg(*args, **kwargs)
return key_arg
keys = map(call_if_callable, keys)
namespace = call_if_callable(namespace)
if namespace:
namespace = _process_namespace_name(namespace)
keys.append(_get_namespace_key(namespace))
return make_key(*keys)
def _set_cache(key, val, timeout):
'''
Wrapper around cache.set to allow timedelta timeouts.
'''
if isinstance(timeout, timedelta):
timeout = _timedelta_to_seconds(timeout)
if timeout and timeout < 0:
raise Exception('Cache timeout value must not be negative.')
cache.set(key, val, timeout=timeout)
def _add_delete_cache_member(func, keys=None, namespace=None):
'''
Adds an `delete_cache` member function to `func`. Pass it the same args
as `func` so that it can find the right key to delete.
If `func` was decorated with `cached_function` or `cached_view` with a
`keys` parameter specified, `delete_cache` takes no arguments.
'''
def delete_cache(*args, **kwargs):
key = _make_key(keys, namespace, func, args, kwargs)
cache.delete(key)
func.delete_cache = delete_cache
def cached_function(timeout=None, keys=None, namespace=None):
'''
Adds a kwarg to the function, `invalidate_cache`. This allows the function
to setup signal listeners for invalidating the cache.
Works on both functions and class methods.
All kwargs, `timeout`, `keys` and `namespace`, are optional.
`timeout` can be either an int, or a timedelta (or None).
`keys` is used to create a key for the cached value. It must be an iterable.
The items in it are iterated over, serialized into strings, formatted to
remove any illegal characters, then joined into one string to use as the
key. If `keys` is None, we'll automatically create a determinatively and
uniquely identifying key for the function which is hopefully human-readable.
Any key in `keys` can be callable, as well. These will be called with the
same args and kwargs as the decorated function, and their return values
will be serialized and added to the key.
`namespace` is used as an alternative way to invalidate a key or a group of
keys. When `namespace` is used, all keys that belong to the given namespace
can be invalidated simply by passing the namespace name to
`invalidate_namespace`. This is especially helpful when you start worker
processes that don't know what's already in the cache, since it relieves
you of needing to keep track of every key you set.
`namespace` can be either a string (or anything serializable) or a
function. If a function, it will be called with the same arguments as the
function that `cached_function` is decorating. The return value will be
serialized using `make_key`, so you can return a string or an iterable of
things to be serialized. This lets you create namespaces dynamically
depending on some of the arguments passed to whatever you're caching.
For example, you may want to cache several functions in the same namespace
depending on the current user.
Note that the `namespace` function *must* be determinstic -- given the same
input arguments, it must always produce the same output.
'''
def decorator(func):
_add_delete_cache_member(func, keys=keys, namespace=namespace)
@wraps(func)
def wrapped(*args, **kwargs):
key = _make_key(keys, namespace, func, args, kwargs)
val = cache.get(key)
if val is None:
val = func(*args, **kwargs)
_set_cache(key, val, timeout)
return val
return wrapped
return decorator
def _can_cache_request(request):
'''
Only caches if the request is for GET or HEAD, and if the Django messages
app has no messages available for the user.
'''
if len(messages.get_messages(request)) != 0:
return False
return request.method in ['GET', 'HEAD']
def _can_cache_response(response):
# Only set the cache if the HTTP response code is 200.
return (response.status_code != 200
or 'no-cache' in response.get('Cache-Control', '')
or 'no-cache' in response.get('Pragma', ''))
def cached_view(timeout=None, keys=None, namespace=None, add_user_to_key=False):
'''
Use this instead of `cached_function` for caching views. See
`cached_function` for documentation on how to use this.
Handles HttpRequest objects intelligently when auto-generating the
cache key.
Only caches GET and HEAD requests which have an HTTP 200 response code.
Doesn't cache responses which have "Cache-Control: no-cache" or
"Pragma: no-cache" in the headers.
If `add_user_to_key` is True, the key will be prefixed with the user's ID,
if logged in.
'''
def decorator(func):
_add_delete_cache_member(func, keys=keys, namespace=namespace)
@wraps(func)
def wrapped(request, *args, **kwargs):
if not _can_cache_request(request):
return func(request, *args, **kwargs)
_keys = keys
# Default keys.
if not _keys:
# Don't naively add the `request` arg to the cache key.
_keys = _make_keys_from_function(func, *args, **kwargs)
# Only add specific parts of the `request` object to the key.
_keys.extend(chain.from_iterable(request.GET.items()))
_keys.append(request.method)
# Add the current language.
_keys.append(translation.get_language())
# Current site, if available.
_keys.append(getattr(settings, 'SITE_ID', None))
try:
if add_user_to_key and request.user.is_authenticated():
_keys.append(request.user.id)
except AttributeError: # maybe "auth" isn't installed.
pass
key = _make_key(_keys, namespace, func, args, kwargs)
val = cache.get(key)
if val is None:
val = func(request, *args, **kwargs)
if _can_cache_response(val):
_set_cache(key, val, timeout)
return val
return wrapped
return decorator
refactor how keys are generated.
fix callable keys for decorated views.
from datetime import timedelta
from django.conf import settings
from django.contrib import messages
from django.core.cache import cache
from django.http import HttpRequest
from django.utils import translation
from functools import wraps
from intpacker import pack_int
from itertools import chain
import hashlib
import inspect
import re
import string
import time
# A memcached limit.
MAX_KEY_LENGTH = 250
# String containing all invalid characters to strip from memcached keys.
# Contains all control characters from C0 (0x00-0x20 and 0x7F),
# and C1 (0x80-0x9F) as defined by the ISO-2022 and ECMA-48 standards.
#
# References:
# http://code.sixapart.com/svn/memcached/trunk/server/doc/protocol.txt
# http://www.unicode.org/charts/PDF/U0000.pdf
# http://www.unicode.org/charts/PDF/U0080.pdf
_CONTROL_CODE_CHARS = ''.join(chr(i) for i in chain(xrange(0x20 + 1),
[0x7f],
xrange(0x80, 0x9f + 1)))
# String containing all ASCII characters, used for string#translate which
# is an efficient way of deleting characters from a string.
_ALL_CHARS = string.maketrans('', '')
def _format_key_arg(arg):
'''
Selectively formats args passed to `make_key`. Defaults to serializing
into a Unicode string and then encoding in UTF-8.
'''
to_string = lambda x: unicode(x).encode('utf8')
if isinstance(arg, dict):
# `str` is wasteful for dicts, for our case here.
s = ','.join([to_string(key) + ':' + to_string(val)
for key,val in arg.items()])
else:
s = to_string(arg)
# Strip control characters and spaces (which memcached won't allow).
return s.translate(_ALL_CHARS, _CONTROL_CODE_CHARS)
def make_key(*args):
'''
This does a couple things to cleanly make a key out of the given arguments:
1. Removes any control code characters and spaces [1] (which are
illegal in memcached keys [2].)
2. After serializing all arguments and joining them into one string, if
the resulting length is > MAX_KEY_LENGTH bytes (250 by default,
which is the memcached protocol limit), it generates a hash out of
the key instead.
It's possible the resulting key would be empty, so choose your args
carefully to avoid this.
TODO a further refinement of this would be to hash only the smallest part
necessary to get it under the limit. Don't hash an entire key just for
being 1 char too long. This would improve readability.
[1] http://www.unicode.org/charts/PDF/U0000.pdf
http://www.unicode.org/charts/PDF/U0080.pdf
[2] http://code.sixapart.com/svn/memcached/trunk/server/doc/protocol.txt
'''
key = '.'.join(map(_format_key_arg, filter(lambda x: x is not None, args)))
# If our key is too long, hash the part after the prefix,
# and truncate as needed.
if len(cache.make_key(key)) > MAX_KEY_LENGTH:
prefix = cache.make_key('')
# Just to be safe... we should be able to have a key >= 1 char long :)
if len(prefix) >= MAX_KEY_LENGTH:
raise Exception('Your cache key prefixes are too long.')
key = hashlib.md5(key).hexdigest()[:MAX_KEY_LENGTH - len(prefix)]
return key
def _make_keys_from_function(func, *args, **kwargs):
'''
Add a bunch of hopefully uniquely identifying parameters to a list to be
passed to `make_key`. It's pretty crafty in finding distinguishing params
to use, but it is slightly conservative so as to not overrun the memcached
key length limit, which results in a non-human-readable hash for a key.
'''
keys = ['cached_func', func.__name__]
# This works on both functions and class methods.
signature_args = inspect.getargspec(func).args
if (inspect.ismethod(func)
or (signature_args and signature_args[0] in ['self', 'cls'])):
# Method, probably.
#
# If ismethod returns True, it's definitely a method. Otherwise,
# we have to guess based on the first arg of the function's signature.
# This is the best guess we can have in Python, because the way a
# function is passed to a decorator inside a class definition, the
# decorated function is as yet neither a bound nor unbound method. It's
# just a regular function. So we must guess from its args.
#
# A guess is good enough, since it only means that we add some extra
# fields from the first arg. If we're wrong, the key is more
# conservative (more detailed) than need be. We could wrongly call it a
# function when it's actually a method, but only if they're doing
# unsightly things like naming the "self" or "cls" arg something else.
self = args[0]
keys.append(self.__class__.__name__)
if hasattr(self, 'pk'): # django model? `pk` is a great differentiator!
keys.append(self.pk)
keys.extend(args[1:])
else:
# Function.
keys.extend(args)
keys.extend(kwargs.values())
# To be extra safe! (unreadable, so at end of key.)
# If this results in any collisions, it actually won't make a difference.
# It's fine to memoize functions that collide on this as if
# they are one, since they're identical if their codeblock hash is the same.
keys.append(pack_int(func.__code__.__hash__()))
return keys
def _timedelta_to_seconds(t):
'''
Returns an int.
Tries to use Python 2.7's timedelta#total_seconds, if available.
'''
try:
return int(t.total_seconds())
except AttributeError:
return int(t.microseconds + (t.seconds + t.days * 86400))
def _make_namespace_key(namespace):
'''
Returns a likely-to-be-unique value that can be incremented with
`cache.incr`.
'''
# Use (an overly-cautious) time-since-epoch modulo decade, in nanoseconds.
decade_s = 3600 * 24 * 365 * 10 # decade in seconds.
return int((time.time() % decade_s) * 1e9)
def _get_namespace_key(namespace):
'''
Gets (or sets if uninitialized) the key prefix for the given namespace. The
return value is used to prefix any keys that belong to the namespace.
'''
ns_key = cache.get(namespace)
if not ns_key:
ns_key = _make_namespace_key(namespace)
cache.set(namespace, ns_key)
# Compact the key before returning it to save space when using it.
return pack_int(ns_key)
def _process_namespace_name(namespace):
'''
A namespace can be any serializable object or list of objects, not just a
string. This serializes the namespace name by passing it to `make_key`.
'''
# Don't explode strings.
if isinstance(namespace, str):
return make_key(namespace)
# First try as if it's an iterable.
try:
return make_key(*namespace)
except TypeError: # It might not be an iterable.
return make_key(namespace)
def invalidate_namespace(namespace):
'''
If the namespace is already invalid (i.e. the namespace key has been
deleted from the cache), this does nothing.
This operation is atomic as long as the cache backend's `incr` is too.
It is an O(1) operation, independent of the number of keys in a namespace.
'''
namespace = _process_namespace_name(namespace)
try:
cache.incr(namespace)
except ValueError:
# The namespace is already invalid, since its key is gone.
pass
def _make_key(keys, namespace, func_args, func_kwargs):
'''
Returns the cache key to use for the decorated function. Calls and replaces
any callable items in `keys` with their return values before sending `keys`
over to `make_key`. Does the same for a callable `namespace`.
'''
def call_if_callable(key_arg):
if callable(key_arg):
return key_arg(*func_args, **func_kwargs)
return key_arg
keys = map(call_if_callable, keys)
namespace = call_if_callable(namespace)
if namespace:
namespace = _process_namespace_name(namespace)
keys.append(_get_namespace_key(namespace))
return make_key(*keys)
def _set_cache(key, val, timeout):
'''
Wrapper around cache.set to allow timedelta timeouts for our decorators.
'''
if isinstance(timeout, timedelta):
timeout = _timedelta_to_seconds(timeout)
if timeout and timeout < 0:
raise Exception('Cache timeout value must not be negative.')
cache.set(key, val, timeout=timeout)
def _add_delete_cache_member(func, keys=None, namespace=None):
'''
Adds an `delete_cache` member function to `func`. Pass it the same args
as `func` so that it can find the right key to delete.
If `func` was decorated with `cached_function` or `cached_view` with a
`keys` parameter specified, `delete_cache` takes no arguments.
'''
def delete_cache(*args, **kwargs):
_keys = keys or _make_keys_from_function(func, *args, **kwargs)
key = _make_key(_keys, namespace, args, kwargs)
cache.delete(key)
func.delete_cache = delete_cache
def cached_function(timeout=None, keys=None, namespace=None):
'''
Adds a kwarg to the function, `invalidate_cache`. This allows the function
to setup signal listeners for invalidating the cache.
Works on both functions and class methods.
All kwargs, `timeout`, `keys` and `namespace`, are optional.
`timeout` can be either an int, or a timedelta (or None).
`keys` is used to create a key for the cached value. It must be an iterable.
The items in it are iterated over, serialized into strings, formatted to
remove any illegal characters, then joined into one string to use as the
key. If `keys` is None, we'll automatically create a determinatively and
uniquely identifying key for the function which is hopefully human-readable.
Any key in `keys` can be callable, as well. These will be called with the
same args and kwargs as the decorated function, and their return values
will be serialized and added to the key.
`namespace` is used as an alternative way to invalidate a key or a group of
keys. When `namespace` is used, all keys that belong to the given namespace
can be invalidated simply by passing the namespace name to
`invalidate_namespace`. This is especially helpful when you start worker
processes that don't know what's already in the cache, since it relieves
you of needing to keep track of every key you set.
`namespace` can be either a string (or anything serializable) or a
function. If a function, it will be called with the same arguments as the
function that `cached_function` is decorating. The return value will be
serialized using `make_key`, so you can return a string or an iterable of
things to be serialized. This lets you create namespaces dynamically
depending on some of the arguments passed to whatever you're caching.
For example, you may want to cache several functions in the same namespace
depending on the current user.
Note that the `namespace` function *must* be determinstic -- given the same
input arguments, it must always produce the same output.
'''
def decorator(func):
_add_delete_cache_member(func, keys=keys, namespace=namespace)
@wraps(func)
def wrapped(*args, **kwargs):
_keys = keys or _make_keys_from_function(func, *args, **kwargs)
key = _make_key(_keys, namespace, args, kwargs)
val = cache.get(key)
if val is None:
val = func(*args, **kwargs)
_set_cache(key, val, timeout)
return val
return wrapped
return decorator
def _can_cache_request(request):
'''
Only caches if the request is for GET or HEAD, and if the Django messages
app has no messages available for the user.
'''
if len(messages.get_messages(request)) != 0:
return False
return request.method in ['GET', 'HEAD']
def _can_cache_response(response):
# Only set the cache if the HTTP response code is 200.
return (response.status_code != 200
or 'no-cache' in response.get('Cache-Control', '')
or 'no-cache' in response.get('Pragma', ''))
def cached_view(timeout=None, keys=None, namespace=None, add_user_to_key=False):
'''
Use this instead of `cached_function` for caching views. See
`cached_function` for documentation on how to use this.
Handles HttpRequest objects intelligently when auto-generating the
cache key.
Only caches GET and HEAD requests which have an HTTP 200 response code.
Doesn't cache responses which have "Cache-Control: no-cache" or
"Pragma: no-cache" in the headers.
If `add_user_to_key` is True, the key will be prefixed with the user's ID,
if logged in.
'''
def decorator(func):
_add_delete_cache_member(func, keys=keys, namespace=namespace)
@wraps(func)
def wrapped(request, *args, **kwargs):
if not _can_cache_request(request):
return func(request, *args, **kwargs)
_keys = keys
# Default keys.
if not _keys:
# Don't naively add the `request` arg to the cache key.
_keys = _make_keys_from_function(func, *args, **kwargs)
# Only add specific parts of the `request` object to the key.
_keys.extend(chain.from_iterable(request.GET.items()))
_keys.append(request.method)
# Add the current language.
_keys.append(translation.get_language())
# Current site, if available.
_keys.append(getattr(settings, 'SITE_ID', None))
try:
if add_user_to_key and request.user.is_authenticated():
_keys.append(request.user.id)
except AttributeError: # maybe "auth" isn't installed.
pass
# Add `request` to `args` since _make_key wants all func args in it.
key = _make_key(_keys, namespace, (request,) + args, kwargs)
val = cache.get(key)
if val is None:
val = func(request, request, *args, **kwargs)
if _can_cache_response(val):
_set_cache(key, val, timeout)
return val
return wrapped
return decorator
|
# -*- coding: utf-8 -*-
import sys
import json
import threading
import six
from funcy import select_keys, cached_property, once, once_per, monkey, wraps, walk, chain
from funcy.py2 import mapcat, map
from .cross import pickle, md5
import django
from django.utils.encoding import smart_str, force_text
from django.core.exceptions import ImproperlyConfigured
from django.db.models import Manager, Model
from django.db.models.query import QuerySet
from django.db.models.sql.datastructures import EmptyResultSet
from django.db.models.signals import pre_save, post_save, post_delete, m2m_changed
from .conf import model_profile, settings, ALL_OPS
from .utils import monkey_mix, stamp_fields, func_cache_key, cached_view_fab, family_has_profile
from .redis import redis_client, handle_connection_failure, load_script
from .tree import dnfs
from .invalidation import invalidate_obj, invalidate_dict, no_invalidation
from .transaction import transaction_states
from .signals import cache_read
__all__ = ('cached_as', 'cached_view_as', 'install_cacheops')
_local_get_cache = {}
@handle_connection_failure
def cache_thing(cache_key, data, cond_dnfs, timeout, dbs=()):
"""
Writes data to cache and creates appropriate invalidators.
"""
# Could have changed after last check, sometimes superficially
if transaction_states.is_dirty(dbs):
return
load_script('cache_thing', settings.CACHEOPS_LRU)(
keys=[cache_key],
args=[
pickle.dumps(data, -1),
json.dumps(cond_dnfs, default=str),
timeout
]
)
def cached_as(*samples, **kwargs):
"""
Caches results of a function and invalidates them same way as given queryset(s).
NOTE: Ignores queryset cached ops settings, always caches.
"""
timeout = kwargs.pop('timeout', None)
extra = kwargs.pop('extra', None)
key_func = kwargs.pop('key_func', func_cache_key)
lock = kwargs.pop('lock', None)
if not samples:
raise TypeError('Pass a queryset, a model or an object to cache like')
if kwargs:
raise TypeError('Unexpected keyword arguments %s' % ', '.join(kwargs))
# If we unexpectedly get list instead of queryset return identity decorator.
# Paginator could do this when page.object_list is empty.
if len(samples) == 1 and isinstance(samples[0], list):
return lambda func: func
def _get_queryset(sample):
if isinstance(sample, Model):
queryset = sample.__class__.objects.filter(pk=sample.pk)
elif isinstance(sample, type) and issubclass(sample, Model):
queryset = sample.objects.all()
else:
queryset = sample
queryset._require_cacheprofile()
return queryset
querysets = map(_get_queryset, samples)
dbs = {qs.db for qs in querysets}
cond_dnfs = mapcat(dnfs, querysets)
key_extra = [qs._cache_key() for qs in querysets]
key_extra.append(extra)
if not timeout: # TODO: switch to is None on major release
timeout = min(qs._cacheprofile['timeout'] for qs in querysets)
if lock is None:
lock = any(qs._cacheprofile['lock'] for qs in querysets)
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
if not settings.CACHEOPS_ENABLED or transaction_states.is_dirty(dbs):
return func(*args, **kwargs)
cache_key = 'as:' + key_func(func, args, kwargs, key_extra)
with redis_client.getting(cache_key, lock=lock) as cache_data:
cache_read.send(sender=None, func=func, hit=cache_data is not None)
if cache_data is not None:
return pickle.loads(cache_data)
else:
result = func(*args, **kwargs)
cache_thing(cache_key, result, cond_dnfs, timeout, dbs=dbs)
return result
return wrapper
return decorator
def cached_view_as(*samples, **kwargs):
return cached_view_fab(cached_as)(*samples, **kwargs)
class QuerySetMixin(object):
@cached_property
def _cacheprofile(self):
profile = model_profile(self.model)
return profile.copy() if profile else None
@cached_property
def _cloning(self):
return 1000
def _require_cacheprofile(self):
if self._cacheprofile is None:
raise ImproperlyConfigured(
'Cacheops is not enabled for %s.%s model.\n'
'If you don\'t want to cache anything by default '
'you can configure it with empty ops.'
% (self.model._meta.app_label, self.model._meta.model_name))
def _cache_key(self):
"""
Compute a cache key for this queryset
"""
md = md5()
md.update('%s.%s' % (self.__class__.__module__, self.__class__.__name__))
# Vary cache key for proxy models
md.update('%s.%s' % (self.model.__module__, self.model.__name__))
# Protect from field list changes in model
md.update(stamp_fields(self.model))
# Use query SQL as part of a key
try:
sql, params = self.query.get_compiler(self.db).as_sql()
try:
sql_str = sql % params
except UnicodeDecodeError:
sql_str = sql % walk(force_text, params)
md.update(smart_str(sql_str))
except EmptyResultSet:
pass
# If query results differ depending on database
if self._cacheprofile and not self._cacheprofile['db_agnostic']:
md.update(self.db)
# Thing only appeared in Django 1.9
it_class = getattr(self, '_iterable_class', None)
if it_class:
md.update('%s.%s' % (it_class.__module__, it_class.__name__))
# 'flat' attribute changes results formatting for values_list() in Django 1.8 and earlier
if hasattr(self, 'flat'):
md.update(str(self.flat))
return 'q:%s' % md.hexdigest()
def _cache_results(self, cache_key, results):
cond_dnfs = dnfs(self)
cache_thing(cache_key, results, cond_dnfs, self._cacheprofile['timeout'], dbs=[self.db])
def cache(self, ops=None, timeout=None, lock=None):
"""
Enables caching for given ops
ops - a subset of {'get', 'fetch', 'count', 'exists'},
ops caching to be turned on, all enabled by default
timeout - override default cache timeout
lock - use lock to prevent dog-pile effect
NOTE: you actually can disable caching by omiting corresponding ops,
.cache(ops=[]) disables caching for this queryset.
"""
self._require_cacheprofile()
if ops is None or ops == 'all':
ops = ALL_OPS
if isinstance(ops, str):
ops = {ops}
self._cacheprofile['ops'] = set(ops)
if timeout is not None:
self._cacheprofile['timeout'] = timeout
if lock is not None:
self._cacheprofile['lock'] = lock
return self
def nocache(self):
"""
Convinience method, turns off caching for this queryset
"""
# cache profile not present means caching is not enabled for this model
if self._cacheprofile is None:
return self
else:
return self.cache(ops=[])
def cloning(self, cloning=1000):
self._cloning = cloning
return self
def inplace(self):
return self.cloning(0)
if django.VERSION >= (1, 9):
def _clone(self, **kwargs):
if self._cloning:
return self.clone(**kwargs)
else:
self.__dict__.update(kwargs)
return self
def clone(self, **kwargs):
# NOTE: need to copy profile so that clone changes won't affect this queryset
if '_cacheprofile' in self.__dict__ and self._cacheprofile:
kwargs.setdefault('_cacheprofile', self._cacheprofile.copy())
clone = self._no_monkey._clone(self, **kwargs)
clone._cloning = self._cloning - 1 if self._cloning else 0
return clone
else:
def _clone(self, klass=None, setup=False, **kwargs):
if self._cloning:
return self.clone(klass, setup, **kwargs)
elif klass is not None:
# HACK: monkey patch self.query.clone for single call
# to return itself instead of cloning
original_query_clone = self.query.clone
def query_clone():
self.query.clone = original_query_clone
return self.query
self.query.clone = query_clone
return self.clone(klass, setup, **kwargs)
else:
self.__dict__.update(kwargs)
return self
def clone(self, klass=None, setup=False, **kwargs):
if '_cacheprofile' in self.__dict__ and self._cacheprofile:
kwargs.setdefault('_cacheprofile', self._cacheprofile.copy())
clone = self._no_monkey._clone(self, klass, setup, **kwargs)
clone._cloning = self._cloning - 1 if self._cloning else 0
return clone
def _fetch_all(self):
# If already fetched, cache not enabled, within write or in dirty transaction then fall back
if self._result_cache \
or not settings.CACHEOPS_ENABLED \
or not self._cacheprofile or 'fetch' not in self._cacheprofile['ops'] \
or self._for_write \
or transaction_states[self.db].is_dirty():
return self._no_monkey._fetch_all(self)
cache_key = self._cache_key()
lock = self._cacheprofile['lock']
with redis_client.getting(cache_key, lock=lock) as cache_data:
cache_read.send(sender=self.model, func=None, hit=cache_data is not None)
if cache_data is not None:
self._result_cache = pickle.loads(cache_data)
else:
self._result_cache = list(self.iterator())
self._cache_results(cache_key, self._result_cache)
return self._no_monkey._fetch_all(self)
def count(self):
if self._cacheprofile and 'count' in self._cacheprofile['ops']:
# Optmization borrowed from overriden method:
# if queryset cache is already filled just return its len
if self._result_cache is not None:
return len(self._result_cache)
return cached_as(self)(lambda: self._no_monkey.count(self))()
else:
return self._no_monkey.count(self)
def get(self, *args, **kwargs):
# .get() uses the same ._fetch_all() method to fetch data,
# so here we add 'fetch' to ops
if self._cacheprofile and 'get' in self._cacheprofile['ops']:
# NOTE: local_get=True enables caching of simple gets in local memory,
# which is very fast, but not invalidated.
# Don't bother with Q-objects, select_related and previous filters,
# simple gets - thats what we are really up to here.
if self._cacheprofile['local_get'] \
and not args \
and not self.query.select_related \
and not self.query.where.children:
# NOTE: We use simpler way to generate a cache key to cut costs.
# Some day it could produce same key for diffrent requests.
key = (self.__class__, self.model) + tuple(sorted(kwargs.items()))
try:
return _local_get_cache[key]
except KeyError:
_local_get_cache[key] = self._no_monkey.get(self, *args, **kwargs)
return _local_get_cache[key]
except TypeError:
# If some arg is unhashable we can't save it to dict key,
# we just skip local cache in that case
pass
if 'fetch' in self._cacheprofile['ops']:
qs = self
else:
qs = self._clone().cache()
else:
qs = self
return qs._no_monkey.get(qs, *args, **kwargs)
def exists(self):
if self._cacheprofile and 'exists' in self._cacheprofile['ops']:
if self._result_cache is not None:
return bool(self._result_cache)
return cached_as(self)(lambda: self._no_monkey.exists(self))()
else:
return self._no_monkey.exists(self)
def bulk_create(self, objs, batch_size=None):
objs = self._no_monkey.bulk_create(self, objs, batch_size=batch_size)
if family_has_profile(self.model):
for obj in objs:
invalidate_obj(obj)
return objs
def invalidated_update(self, **kwargs):
clone = self._clone().nocache()
clone._for_write = True # affects routing
objects = list(clone)
rows = clone.update(**kwargs) # Also drops queryset cache
for obj in chain(objects, clone):
invalidate_obj(obj)
return rows
def connect_first(signal, receiver, sender):
old_receivers = signal.receivers
signal.receivers = []
signal.connect(receiver, sender=sender, weak=False)
signal.receivers += old_receivers
# We need to stash old object before Model.save() to invalidate on its properties
_old_objs = threading.local()
class ManagerMixin(object):
@once_per('cls')
def _install_cacheops(self, cls):
if family_has_profile(cls):
# Set up signals
connect_first(pre_save, self._pre_save, sender=cls)
connect_first(post_save, self._post_save, sender=cls)
connect_first(post_delete, self._post_delete, sender=cls)
# Install auto-created models as their module attributes to make them picklable
module = sys.modules[cls.__module__]
if not hasattr(module, cls.__name__):
setattr(module, cls.__name__, cls)
def contribute_to_class(self, cls, name):
self._no_monkey.contribute_to_class(self, cls, name)
# Django migrations create lots of fake models, just skip them
# NOTE: we make it here rather then inside _install_cacheops()
# because we don't want @once_per() to hold refs to all of them.
if cls.__module__ != '__fake__':
self._install_cacheops(cls)
def _pre_save(self, sender, instance, **kwargs):
if not (instance.pk is None or instance._state.adding or no_invalidation.active):
try:
_old_objs.__dict__[sender, instance.pk] = sender.objects.get(pk=instance.pk)
except sender.DoesNotExist:
pass
def _post_save(self, sender, instance, **kwargs):
if not settings.CACHEOPS_ENABLED:
return
# Invoke invalidations for both old and new versions of saved object
old = _old_objs.__dict__.pop((sender, instance.pk), None)
if old:
invalidate_obj(old)
invalidate_obj(instance)
# We run invalidations but skip caching if we are dirty
if transaction_states[instance._state.db].is_dirty():
return
# NOTE: it's possible for this to be a subclass, e.g. proxy, without cacheprofile,
# but its base having one. Or vice versa.
# We still need to invalidate in this case, but cache on save better be skipped.
cacheprofile = model_profile(instance.__class__)
if not cacheprofile:
return
# Enabled cache_on_save makes us write saved object to cache.
# Later it can be retrieved with .get(<cache_on_save_field>=<value>)
# <cache_on_save_field> is pk unless specified.
# This sweet trick saves a db request and helps with slave lag.
cache_on_save = cacheprofile.get('cache_on_save')
if cache_on_save:
# HACK: We get this object "from field" so it can contain
# some undesirable attributes or other objects attached.
# RelatedField accessors do that, for example.
#
# So we strip down any _*_cache attrs before saving
# and later reassign them
unwanted_dict = select_keys(r'^_.*_cache$', instance.__dict__)
for k in unwanted_dict:
del instance.__dict__[k]
key = 'pk' if cache_on_save is True else cache_on_save
cond = {key: getattr(instance, key)}
qs = sender.objects.inplace().filter(**cond).order_by()
qs._cache_results(qs._cache_key(), [instance])
# Reverting stripped attributes
instance.__dict__.update(unwanted_dict)
def _post_delete(self, sender, instance, **kwargs):
"""
Invalidation upon object deletion.
"""
# NOTE: this will behave wrong if someone changed object fields
# before deletion (why anyone will do that?)
invalidate_obj(instance)
def inplace(self):
return self.get_queryset().inplace()
def cache(self, *args, **kwargs):
return self.get_queryset().cache(*args, **kwargs)
def nocache(self):
return self.get_queryset().nocache()
def invalidated_update(self, **kwargs):
return self.get_queryset().inplace().invalidated_update(**kwargs)
def invalidate_m2m(sender=None, instance=None, model=None, action=None, pk_set=None, reverse=None,
**kwargs):
"""
Invoke invalidation on m2m changes.
"""
# Skip this machinery for explicit through tables,
# since post_save and post_delete events are triggered for them
if not sender._meta.auto_created:
return
if action not in ('pre_clear', 'post_add', 'pre_remove'):
return
# NOTE: .rel moved to .remote_field in Django 1.9
if django.VERSION >= (1, 9):
get_remote = lambda f: f.remote_field
else:
get_remote = lambda f: f.rel
m2m = next(m2m for m2m in instance._meta.many_to_many + model._meta.many_to_many
if get_remote(m2m).through == sender)
instance_column, model_column = m2m.m2m_column_name(), m2m.m2m_reverse_name()
if reverse:
instance_column, model_column = model_column, instance_column
# TODO: optimize several invalidate_objs/dicts at once
if action == 'pre_clear':
objects = sender.objects.filter(**{instance_column: instance.pk})
for obj in objects:
invalidate_obj(obj)
elif action in ('post_add', 'pre_remove'):
# NOTE: we don't need to query through objects here,
# cause we already know all their meaningfull attributes.
for pk in pk_set:
invalidate_dict(sender, {
instance_column: instance.pk,
model_column: pk
})
@once
def install_cacheops():
"""
Installs cacheops by numerous monkey patches
"""
monkey_mix(Manager, ManagerMixin)
monkey_mix(QuerySet, QuerySetMixin)
QuerySet._cacheprofile = QuerySetMixin._cacheprofile
QuerySet._cloning = QuerySetMixin._cloning
# Use app registry to introspect used apps
from django.apps import apps
# Install profile and signal handlers for any earlier created models
for model in apps.get_models(include_auto_created=True):
model._default_manager._install_cacheops(model)
# Turn off caching in admin
if apps.is_installed('django.contrib.admin'):
from django.contrib.admin.options import ModelAdmin
@monkey(ModelAdmin)
def get_queryset(self, request):
return get_queryset.original(self, request).nocache()
# Bind m2m changed handler
m2m_changed.connect(invalidate_m2m)
# Make buffers/memoryviews pickleable to serialize binary field data
if six.PY2:
import copy_reg
copy_reg.pickle(buffer, lambda b: (buffer, (bytes(b),))) # noqa
if six.PY3:
import copyreg
copyreg.pickle(memoryview, lambda b: (memoryview, (bytes(b),)))
Do not always use chunked cursors in Django 1.11
My version of ._fetch_all() called .iterator() to fill cache, while
Django 1.11 uses different cursors in .iterator() and ._fetch_all().
# -*- coding: utf-8 -*-
import sys
import json
import threading
import six
from funcy import select_keys, cached_property, once, once_per, monkey, wraps, walk, chain
from funcy.py2 import mapcat, map
from .cross import pickle, md5
import django
from django.utils.encoding import smart_str, force_text
from django.core.exceptions import ImproperlyConfigured
from django.db.models import Manager, Model
from django.db.models.query import QuerySet
from django.db.models.sql.datastructures import EmptyResultSet
from django.db.models.signals import pre_save, post_save, post_delete, m2m_changed
from .conf import model_profile, settings, ALL_OPS
from .utils import monkey_mix, stamp_fields, func_cache_key, cached_view_fab, family_has_profile
from .redis import redis_client, handle_connection_failure, load_script
from .tree import dnfs
from .invalidation import invalidate_obj, invalidate_dict, no_invalidation
from .transaction import transaction_states
from .signals import cache_read
__all__ = ('cached_as', 'cached_view_as', 'install_cacheops')
_local_get_cache = {}
@handle_connection_failure
def cache_thing(cache_key, data, cond_dnfs, timeout, dbs=()):
"""
Writes data to cache and creates appropriate invalidators.
"""
# Could have changed after last check, sometimes superficially
if transaction_states.is_dirty(dbs):
return
load_script('cache_thing', settings.CACHEOPS_LRU)(
keys=[cache_key],
args=[
pickle.dumps(data, -1),
json.dumps(cond_dnfs, default=str),
timeout
]
)
def cached_as(*samples, **kwargs):
"""
Caches results of a function and invalidates them same way as given queryset(s).
NOTE: Ignores queryset cached ops settings, always caches.
"""
timeout = kwargs.pop('timeout', None)
extra = kwargs.pop('extra', None)
key_func = kwargs.pop('key_func', func_cache_key)
lock = kwargs.pop('lock', None)
if not samples:
raise TypeError('Pass a queryset, a model or an object to cache like')
if kwargs:
raise TypeError('Unexpected keyword arguments %s' % ', '.join(kwargs))
# If we unexpectedly get list instead of queryset return identity decorator.
# Paginator could do this when page.object_list is empty.
if len(samples) == 1 and isinstance(samples[0], list):
return lambda func: func
def _get_queryset(sample):
if isinstance(sample, Model):
queryset = sample.__class__.objects.filter(pk=sample.pk)
elif isinstance(sample, type) and issubclass(sample, Model):
queryset = sample.objects.all()
else:
queryset = sample
queryset._require_cacheprofile()
return queryset
querysets = map(_get_queryset, samples)
dbs = {qs.db for qs in querysets}
cond_dnfs = mapcat(dnfs, querysets)
key_extra = [qs._cache_key() for qs in querysets]
key_extra.append(extra)
if not timeout: # TODO: switch to is None on major release
timeout = min(qs._cacheprofile['timeout'] for qs in querysets)
if lock is None:
lock = any(qs._cacheprofile['lock'] for qs in querysets)
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
if not settings.CACHEOPS_ENABLED or transaction_states.is_dirty(dbs):
return func(*args, **kwargs)
cache_key = 'as:' + key_func(func, args, kwargs, key_extra)
with redis_client.getting(cache_key, lock=lock) as cache_data:
cache_read.send(sender=None, func=func, hit=cache_data is not None)
if cache_data is not None:
return pickle.loads(cache_data)
else:
result = func(*args, **kwargs)
cache_thing(cache_key, result, cond_dnfs, timeout, dbs=dbs)
return result
return wrapper
return decorator
def cached_view_as(*samples, **kwargs):
return cached_view_fab(cached_as)(*samples, **kwargs)
class QuerySetMixin(object):
@cached_property
def _cacheprofile(self):
profile = model_profile(self.model)
return profile.copy() if profile else None
@cached_property
def _cloning(self):
return 1000
def _require_cacheprofile(self):
if self._cacheprofile is None:
raise ImproperlyConfigured(
'Cacheops is not enabled for %s.%s model.\n'
'If you don\'t want to cache anything by default '
'you can configure it with empty ops.'
% (self.model._meta.app_label, self.model._meta.model_name))
def _cache_key(self):
"""
Compute a cache key for this queryset
"""
md = md5()
md.update('%s.%s' % (self.__class__.__module__, self.__class__.__name__))
# Vary cache key for proxy models
md.update('%s.%s' % (self.model.__module__, self.model.__name__))
# Protect from field list changes in model
md.update(stamp_fields(self.model))
# Use query SQL as part of a key
try:
sql, params = self.query.get_compiler(self.db).as_sql()
try:
sql_str = sql % params
except UnicodeDecodeError:
sql_str = sql % walk(force_text, params)
md.update(smart_str(sql_str))
except EmptyResultSet:
pass
# If query results differ depending on database
if self._cacheprofile and not self._cacheprofile['db_agnostic']:
md.update(self.db)
# Thing only appeared in Django 1.9
it_class = getattr(self, '_iterable_class', None)
if it_class:
md.update('%s.%s' % (it_class.__module__, it_class.__name__))
# 'flat' attribute changes results formatting for values_list() in Django 1.8 and earlier
if hasattr(self, 'flat'):
md.update(str(self.flat))
return 'q:%s' % md.hexdigest()
def _cache_results(self, cache_key, results):
cond_dnfs = dnfs(self)
cache_thing(cache_key, results, cond_dnfs, self._cacheprofile['timeout'], dbs=[self.db])
def cache(self, ops=None, timeout=None, lock=None):
"""
Enables caching for given ops
ops - a subset of {'get', 'fetch', 'count', 'exists'},
ops caching to be turned on, all enabled by default
timeout - override default cache timeout
lock - use lock to prevent dog-pile effect
NOTE: you actually can disable caching by omiting corresponding ops,
.cache(ops=[]) disables caching for this queryset.
"""
self._require_cacheprofile()
if ops is None or ops == 'all':
ops = ALL_OPS
if isinstance(ops, str):
ops = {ops}
self._cacheprofile['ops'] = set(ops)
if timeout is not None:
self._cacheprofile['timeout'] = timeout
if lock is not None:
self._cacheprofile['lock'] = lock
return self
def nocache(self):
"""
Convinience method, turns off caching for this queryset
"""
# cache profile not present means caching is not enabled for this model
if self._cacheprofile is None:
return self
else:
return self.cache(ops=[])
def cloning(self, cloning=1000):
self._cloning = cloning
return self
def inplace(self):
return self.cloning(0)
if django.VERSION >= (1, 9):
def _clone(self, **kwargs):
if self._cloning:
return self.clone(**kwargs)
else:
self.__dict__.update(kwargs)
return self
def clone(self, **kwargs):
# NOTE: need to copy profile so that clone changes won't affect this queryset
if '_cacheprofile' in self.__dict__ and self._cacheprofile:
kwargs.setdefault('_cacheprofile', self._cacheprofile.copy())
clone = self._no_monkey._clone(self, **kwargs)
clone._cloning = self._cloning - 1 if self._cloning else 0
return clone
else:
def _clone(self, klass=None, setup=False, **kwargs):
if self._cloning:
return self.clone(klass, setup, **kwargs)
elif klass is not None:
# HACK: monkey patch self.query.clone for single call
# to return itself instead of cloning
original_query_clone = self.query.clone
def query_clone():
self.query.clone = original_query_clone
return self.query
self.query.clone = query_clone
return self.clone(klass, setup, **kwargs)
else:
self.__dict__.update(kwargs)
return self
def clone(self, klass=None, setup=False, **kwargs):
if '_cacheprofile' in self.__dict__ and self._cacheprofile:
kwargs.setdefault('_cacheprofile', self._cacheprofile.copy())
clone = self._no_monkey._clone(self, klass, setup, **kwargs)
clone._cloning = self._cloning - 1 if self._cloning else 0
return clone
def _fetch_all(self):
# If already fetched, cache not enabled, within write or in dirty transaction then fall back
if self._result_cache \
or not settings.CACHEOPS_ENABLED \
or not self._cacheprofile or 'fetch' not in self._cacheprofile['ops'] \
or self._for_write \
or transaction_states[self.db].is_dirty():
return self._no_monkey._fetch_all(self)
cache_key = self._cache_key()
lock = self._cacheprofile['lock']
with redis_client.getting(cache_key, lock=lock) as cache_data:
cache_read.send(sender=self.model, func=None, hit=cache_data is not None)
if cache_data is not None:
self._result_cache = pickle.loads(cache_data)
else:
# This thing appears in Django 1.9 and from in Django 1.11
# is used instead of .iterator() call
if hasattr(self, '_iterable_class'):
self._result_cache = list(self._iterable_class(self))
else:
self._result_cache = list(self.iterator())
self._cache_results(cache_key, self._result_cache)
return self._no_monkey._fetch_all(self)
def count(self):
if self._cacheprofile and 'count' in self._cacheprofile['ops']:
# Optmization borrowed from overriden method:
# if queryset cache is already filled just return its len
if self._result_cache is not None:
return len(self._result_cache)
return cached_as(self)(lambda: self._no_monkey.count(self))()
else:
return self._no_monkey.count(self)
def get(self, *args, **kwargs):
# .get() uses the same ._fetch_all() method to fetch data,
# so here we add 'fetch' to ops
if self._cacheprofile and 'get' in self._cacheprofile['ops']:
# NOTE: local_get=True enables caching of simple gets in local memory,
# which is very fast, but not invalidated.
# Don't bother with Q-objects, select_related and previous filters,
# simple gets - thats what we are really up to here.
if self._cacheprofile['local_get'] \
and not args \
and not self.query.select_related \
and not self.query.where.children:
# NOTE: We use simpler way to generate a cache key to cut costs.
# Some day it could produce same key for diffrent requests.
key = (self.__class__, self.model) + tuple(sorted(kwargs.items()))
try:
return _local_get_cache[key]
except KeyError:
_local_get_cache[key] = self._no_monkey.get(self, *args, **kwargs)
return _local_get_cache[key]
except TypeError:
# If some arg is unhashable we can't save it to dict key,
# we just skip local cache in that case
pass
if 'fetch' in self._cacheprofile['ops']:
qs = self
else:
qs = self._clone().cache()
else:
qs = self
return qs._no_monkey.get(qs, *args, **kwargs)
def exists(self):
if self._cacheprofile and 'exists' in self._cacheprofile['ops']:
if self._result_cache is not None:
return bool(self._result_cache)
return cached_as(self)(lambda: self._no_monkey.exists(self))()
else:
return self._no_monkey.exists(self)
def bulk_create(self, objs, batch_size=None):
objs = self._no_monkey.bulk_create(self, objs, batch_size=batch_size)
if family_has_profile(self.model):
for obj in objs:
invalidate_obj(obj)
return objs
def invalidated_update(self, **kwargs):
clone = self._clone().nocache()
clone._for_write = True # affects routing
objects = list(clone)
rows = clone.update(**kwargs) # Also drops queryset cache
for obj in chain(objects, clone):
invalidate_obj(obj)
return rows
def connect_first(signal, receiver, sender):
old_receivers = signal.receivers
signal.receivers = []
signal.connect(receiver, sender=sender, weak=False)
signal.receivers += old_receivers
# We need to stash old object before Model.save() to invalidate on its properties
_old_objs = threading.local()
class ManagerMixin(object):
@once_per('cls')
def _install_cacheops(self, cls):
if family_has_profile(cls):
# Set up signals
connect_first(pre_save, self._pre_save, sender=cls)
connect_first(post_save, self._post_save, sender=cls)
connect_first(post_delete, self._post_delete, sender=cls)
# Install auto-created models as their module attributes to make them picklable
module = sys.modules[cls.__module__]
if not hasattr(module, cls.__name__):
setattr(module, cls.__name__, cls)
def contribute_to_class(self, cls, name):
self._no_monkey.contribute_to_class(self, cls, name)
# Django migrations create lots of fake models, just skip them
# NOTE: we make it here rather then inside _install_cacheops()
# because we don't want @once_per() to hold refs to all of them.
if cls.__module__ != '__fake__':
self._install_cacheops(cls)
def _pre_save(self, sender, instance, **kwargs):
if not (instance.pk is None or instance._state.adding or no_invalidation.active):
try:
_old_objs.__dict__[sender, instance.pk] = sender.objects.get(pk=instance.pk)
except sender.DoesNotExist:
pass
def _post_save(self, sender, instance, **kwargs):
if not settings.CACHEOPS_ENABLED:
return
# Invoke invalidations for both old and new versions of saved object
old = _old_objs.__dict__.pop((sender, instance.pk), None)
if old:
invalidate_obj(old)
invalidate_obj(instance)
# We run invalidations but skip caching if we are dirty
if transaction_states[instance._state.db].is_dirty():
return
# NOTE: it's possible for this to be a subclass, e.g. proxy, without cacheprofile,
# but its base having one. Or vice versa.
# We still need to invalidate in this case, but cache on save better be skipped.
cacheprofile = model_profile(instance.__class__)
if not cacheprofile:
return
# Enabled cache_on_save makes us write saved object to cache.
# Later it can be retrieved with .get(<cache_on_save_field>=<value>)
# <cache_on_save_field> is pk unless specified.
# This sweet trick saves a db request and helps with slave lag.
cache_on_save = cacheprofile.get('cache_on_save')
if cache_on_save:
# HACK: We get this object "from field" so it can contain
# some undesirable attributes or other objects attached.
# RelatedField accessors do that, for example.
#
# So we strip down any _*_cache attrs before saving
# and later reassign them
unwanted_dict = select_keys(r'^_.*_cache$', instance.__dict__)
for k in unwanted_dict:
del instance.__dict__[k]
key = 'pk' if cache_on_save is True else cache_on_save
cond = {key: getattr(instance, key)}
qs = sender.objects.inplace().filter(**cond).order_by()
qs._cache_results(qs._cache_key(), [instance])
# Reverting stripped attributes
instance.__dict__.update(unwanted_dict)
def _post_delete(self, sender, instance, **kwargs):
"""
Invalidation upon object deletion.
"""
# NOTE: this will behave wrong if someone changed object fields
# before deletion (why anyone will do that?)
invalidate_obj(instance)
def inplace(self):
return self.get_queryset().inplace()
def cache(self, *args, **kwargs):
return self.get_queryset().cache(*args, **kwargs)
def nocache(self):
return self.get_queryset().nocache()
def invalidated_update(self, **kwargs):
return self.get_queryset().inplace().invalidated_update(**kwargs)
def invalidate_m2m(sender=None, instance=None, model=None, action=None, pk_set=None, reverse=None,
**kwargs):
"""
Invoke invalidation on m2m changes.
"""
# Skip this machinery for explicit through tables,
# since post_save and post_delete events are triggered for them
if not sender._meta.auto_created:
return
if action not in ('pre_clear', 'post_add', 'pre_remove'):
return
# NOTE: .rel moved to .remote_field in Django 1.9
if django.VERSION >= (1, 9):
get_remote = lambda f: f.remote_field
else:
get_remote = lambda f: f.rel
m2m = next(m2m for m2m in instance._meta.many_to_many + model._meta.many_to_many
if get_remote(m2m).through == sender)
instance_column, model_column = m2m.m2m_column_name(), m2m.m2m_reverse_name()
if reverse:
instance_column, model_column = model_column, instance_column
# TODO: optimize several invalidate_objs/dicts at once
if action == 'pre_clear':
objects = sender.objects.filter(**{instance_column: instance.pk})
for obj in objects:
invalidate_obj(obj)
elif action in ('post_add', 'pre_remove'):
# NOTE: we don't need to query through objects here,
# cause we already know all their meaningfull attributes.
for pk in pk_set:
invalidate_dict(sender, {
instance_column: instance.pk,
model_column: pk
})
@once
def install_cacheops():
"""
Installs cacheops by numerous monkey patches
"""
monkey_mix(Manager, ManagerMixin)
monkey_mix(QuerySet, QuerySetMixin)
QuerySet._cacheprofile = QuerySetMixin._cacheprofile
QuerySet._cloning = QuerySetMixin._cloning
# Use app registry to introspect used apps
from django.apps import apps
# Install profile and signal handlers for any earlier created models
for model in apps.get_models(include_auto_created=True):
model._default_manager._install_cacheops(model)
# Turn off caching in admin
if apps.is_installed('django.contrib.admin'):
from django.contrib.admin.options import ModelAdmin
@monkey(ModelAdmin)
def get_queryset(self, request):
return get_queryset.original(self, request).nocache()
# Bind m2m changed handler
m2m_changed.connect(invalidate_m2m)
# Make buffers/memoryviews pickleable to serialize binary field data
if six.PY2:
import copy_reg
copy_reg.pickle(buffer, lambda b: (buffer, (bytes(b),))) # noqa
if six.PY3:
import copyreg
copyreg.pickle(memoryview, lambda b: (memoryview, (bytes(b),)))
|
# Copyright (c) 2009 gocept gmbh & co. kg
# See also LICENSE.txt
import zope.viewlet.interfaces
class IEditorContentViewletManager(zope.viewlet.interfaces.IViewletManager):
"""Viewlets which compose an area."""
class IEditBarViewletManager(zope.viewlet.interfaces.IViewletManager):
"""Vielets which compose an edit bar."""
class IFormTabsViewletManager(zope.viewlet.interfaces.IViewletManager):
"""Viewlets for creating the tabs in the form area."""
remove the not-in-use viewlet manager
# Copyright (c) 2009 gocept gmbh & co. kg
# See also LICENSE.txt
import zope.viewlet.interfaces
class IEditorContentViewletManager(zope.viewlet.interfaces.IViewletManager):
"""Viewlets which compose an area."""
class IEditBarViewletManager(zope.viewlet.interfaces.IViewletManager):
"""Vielets which compose an edit bar."""
|
# -*- coding: utf-8 -*-
from operator import concat
from itertools import product
from functools import wraps, reduce
import inspect
import six
# Use Python 2 map here for now
from funcy.py2 import memoize, map, cat
from .cross import json, md5hex
import django
from django.db import models
from django.db.models.query import QuerySet
from django.db.models.sql import AND, OR
from django.db.models.sql.query import Query, ExtraWhere
from django.db.models.sql.where import EverythingNode, NothingNode
from django.db.models.sql.expressions import SQLEvaluator
# A new thing in Django 1.6
try:
from django.db.models.sql.where import SubqueryConstraint
except ImportError:
class SubqueryConstraint(object):
pass
# A new things in Django 1.7
try:
from django.db.models.lookups import Lookup, Exact, In, IsNull
except ImportError:
class Lookup(object):
pass
from django.http import HttpRequest
from .conf import redis_client
LONG_DISJUNCTION = 8
def non_proxy(model):
while model._meta.proxy:
# Every proxy model has exactly one non abstract parent model
model = next(b for b in model.__bases__
if issubclass(b, models.Model) and not b._meta.abstract)
return model
if django.VERSION < (1, 6):
def get_model_name(model):
return model._meta.module_name
else:
def get_model_name(model):
return model._meta.model_name
class MonkeyProxy(object):
def __init__(self, cls):
monkey_bases = [b._no_monkey for b in cls.__bases__ if hasattr(b, '_no_monkey')]
for monkey_base in monkey_bases:
self.__dict__.update(monkey_base.__dict__)
def monkey_mix(cls, mixin, methods=None):
"""
Mixes a mixin into existing class.
Does not use actual multi-inheritance mixins, just monkey patches methods.
Mixin methods can call copies of original ones stored in `_no_monkey` proxy:
class SomeMixin(object):
def do_smth(self, arg):
... do smth else before
self._no_monkey.do_smth(self, arg)
... do smth else after
"""
assert '_no_monkey' not in cls.__dict__, 'Multiple monkey mix not supported'
cls._no_monkey = MonkeyProxy(cls)
if methods is None:
# NOTE: there no such thing as unbound method in Python 3, it uses naked functions,
# so we use some six based altering here
isboundmethod = inspect.isfunction if six.PY3 else inspect.ismethod
methods = inspect.getmembers(mixin, isboundmethod)
else:
methods = [(m, getattr(mixin, m)) for m in methods]
for name, method in methods:
if hasattr(cls, name):
setattr(cls._no_monkey, name, getattr(cls, name))
# NOTE: remember, there is no bound methods in Python 3
setattr(cls, name, six.get_unbound_function(method))
# NOTE: we don't serialize this fields since their values could be very long
# and one should not filter by their equality anyway.
NOT_SERIALIZED_FIELDS = (
models.FileField,
models.TextField, # One should not filter by long text equality
)
if hasattr(models, 'BinaryField'):
NOT_SERIALIZED_FIELDS += (models.BinaryField,) # Not possible to filter by it
def dnfs(qs):
"""
Converts query condition tree into a DNF of eq conds.
Separately for each alias.
Any negations, conditions with lookups other than __exact or __in,
conditions on joined models and subrequests are ignored.
__in is converted into = or = or = ...
"""
SOME = object()
SOME_COND = (None, None, SOME, True)
def negate(term):
return (term[0], term[1], term[2], not term[3])
def _dnf(where):
"""
Constructs DNF of where tree consisting of terms in form:
(alias, attribute, value, negation)
meaning `alias.attribute = value`
or `not alias.attribute = value` if negation is False
Any conditions other then eq are dropped.
"""
# Lookups appeared in Django 1.7
if isinstance(where, Lookup):
attname = where.lhs.target.attname
# TODO: check of all of this are possible
if isinstance(where.rhs, (QuerySet, Query, SQLEvaluator)):
return [[SOME_COND]]
# TODO: deal with transforms, aggregates and such in lhs
elif isinstance(where, Exact):
if isinstance(where.lhs.target, NOT_SERIALIZED_FIELDS):
return [[SOME_COND]]
else:
return [[(where.lhs.alias, attname, where.rhs, True)]]
elif isinstance(where, IsNull):
return [[(where.lhs.alias, attname, None, where.rhs)]]
elif isinstance(where, In) and len(where.rhs) < LONG_DISJUNCTION:
return [[(where.lhs.alias, attname, v, True)] for v in where.rhs]
else:
return [[SOME_COND]]
# Django 1.6 and earlier used tuples to encode conditions
elif isinstance(where, tuple):
constraint, lookup, annotation, value = where
attname = attname_of(model, constraint.col)
if isinstance(value, (QuerySet, Query, SQLEvaluator)):
return [[SOME_COND]]
elif lookup == 'exact':
# TODO: check for non-serialized for both exact and in
if isinstance(constraint.field, NOT_SERIALIZED_FIELDS):
return [[SOME_COND]]
else:
return [[(constraint.alias, attname, value, True)]]
elif lookup == 'isnull':
return [[(constraint.alias, attname, None, value)]]
elif lookup == 'in' and len(value) < LONG_DISJUNCTION:
return [[(constraint.alias, attname, v, True)] for v in value]
else:
return [[SOME_COND]]
elif isinstance(where, EverythingNode):
return [[]]
elif isinstance(where, NothingNode):
return []
elif isinstance(where, (ExtraWhere, SubqueryConstraint)):
return [[SOME_COND]]
elif len(where) == 0:
return [[]]
else:
chilren_dnfs = map(_dnf, where.children)
if len(chilren_dnfs) == 0:
return [[]]
elif len(chilren_dnfs) == 1:
result = chilren_dnfs[0]
else:
# Just unite children joined with OR
if where.connector == OR:
result = cat(chilren_dnfs)
# Use Cartesian product to AND children
else:
result = map(cat, product(*chilren_dnfs))
# Negating and expanding brackets
if where.negated:
result = [map(negate, p) for p in product(*result)]
return result
def clean_conj(conj, for_alias):
# "SOME" conds, negated conds and conds for other aliases should be stripped
return [(attname, value) for alias, attname, value, negation in conj
if value is not SOME and negation and alias == for_alias]
def clean_dnf(tree, for_alias):
cleaned = [clean_conj(conj, for_alias) for conj in tree]
# Any empty conjunction eats up the rest
# NOTE: a more elaborate DNF reduction is not really needed,
# just keep your querysets sane.
if not all(cleaned):
return [[]]
# To keep all schemes the same we sort conjunctions
return map(sorted, cleaned)
def table_for(alias):
if alias == main_alias:
return model._meta.db_table
else:
return qs.query.alias_map[alias][0]
where = qs.query.where
model = qs.model
main_alias = model._meta.db_table
dnf = _dnf(where)
aliases = set(alias for conj in dnf
for alias, _, _, _ in conj
if alias)
aliases.add(main_alias)
return [(table_for(alias), clean_dnf(dnf, alias)) for alias in aliases]
def attname_of(model, col, cache={}):
if model not in cache:
cache[model] = dict((f.db_column, f.attname) for f in model._meta.fields)
return cache[model].get(col, col)
@memoize
def stamp_fields(model):
"""
Returns serialized description of model fields.
"""
stamp = str([(f.name, f.attname, f.db_column, f.__class__) for f in model._meta.fields])
return md5hex(stamp)
### Cache keys calculation
def func_cache_key(func, args, kwargs, extra=None):
"""
Calculate cache key based on func and arguments
"""
factors = [func.__module__, func.__name__, func.__code__.co_firstlineno, args, kwargs, extra]
return md5hex(json.dumps(factors, sort_keys=True, default=str))
def view_cache_key(func, args, kwargs, extra=None):
"""
Calculate cache key for view func.
Use url instead of not properly serializable request argument.
"""
uri = args[0].build_absolute_uri()
return 'v:' + func_cache_key(func, args[1:], kwargs, extra=(uri, extra))
def cached_view_fab(_cached):
def cached_view(*dargs, **dkwargs):
def decorator(func):
dkwargs['_get_key'] = view_cache_key
cached_func = _cached(*dargs, **dkwargs)(func)
@wraps(func)
def wrapper(request, *args, **kwargs):
assert isinstance(request, HttpRequest), \
"A view should be passed with HttpRequest as first argument"
if request.method not in ('GET', 'HEAD'):
return func(request, *args, **kwargs)
return cached_func(request, *args, **kwargs)
return wrapper
return decorator
return cached_view
### Lua script loader
import os.path
@memoize
def load_script(name):
# TODO: strip comments
filename = os.path.join(os.path.dirname(__file__), 'lua/%s.lua' % name)
with open(filename) as f:
code = f.read()
return redis_client.register_script(code)
### Whitespace handling for template tags
import re
from django.utils.safestring import mark_safe
NEWLINE_BETWEEN_TAGS = mark_safe('>\n<')
SPACE_BETWEEN_TAGS = mark_safe('> <')
def carefully_strip_whitespace(text):
text = re.sub(r'>\s*\n\s*<', NEWLINE_BETWEEN_TAGS, text)
text = re.sub(r'>\s{2,}<', SPACE_BETWEEN_TAGS, text)
return text
Remove excessive comment and newline
# -*- coding: utf-8 -*-
from operator import concat
from itertools import product
from functools import wraps, reduce
import inspect
import six
# Use Python 2 map here for now
from funcy.py2 import memoize, map, cat
from .cross import json, md5hex
import django
from django.db import models
from django.db.models.query import QuerySet
from django.db.models.sql import AND, OR
from django.db.models.sql.query import Query, ExtraWhere
from django.db.models.sql.where import EverythingNode, NothingNode
from django.db.models.sql.expressions import SQLEvaluator
# A new thing in Django 1.6
try:
from django.db.models.sql.where import SubqueryConstraint
except ImportError:
class SubqueryConstraint(object):
pass
# A new things in Django 1.7
try:
from django.db.models.lookups import Lookup, Exact, In, IsNull
except ImportError:
class Lookup(object):
pass
from django.http import HttpRequest
from .conf import redis_client
LONG_DISJUNCTION = 8
def non_proxy(model):
while model._meta.proxy:
# Every proxy model has exactly one non abstract parent model
model = next(b for b in model.__bases__
if issubclass(b, models.Model) and not b._meta.abstract)
return model
if django.VERSION < (1, 6):
def get_model_name(model):
return model._meta.module_name
else:
def get_model_name(model):
return model._meta.model_name
class MonkeyProxy(object):
def __init__(self, cls):
monkey_bases = [b._no_monkey for b in cls.__bases__ if hasattr(b, '_no_monkey')]
for monkey_base in monkey_bases:
self.__dict__.update(monkey_base.__dict__)
def monkey_mix(cls, mixin, methods=None):
"""
Mixes a mixin into existing class.
Does not use actual multi-inheritance mixins, just monkey patches methods.
Mixin methods can call copies of original ones stored in `_no_monkey` proxy:
class SomeMixin(object):
def do_smth(self, arg):
... do smth else before
self._no_monkey.do_smth(self, arg)
... do smth else after
"""
assert '_no_monkey' not in cls.__dict__, 'Multiple monkey mix not supported'
cls._no_monkey = MonkeyProxy(cls)
if methods is None:
# NOTE: there no such thing as unbound method in Python 3, it uses naked functions,
# so we use some six based altering here
isboundmethod = inspect.isfunction if six.PY3 else inspect.ismethod
methods = inspect.getmembers(mixin, isboundmethod)
else:
methods = [(m, getattr(mixin, m)) for m in methods]
for name, method in methods:
if hasattr(cls, name):
setattr(cls._no_monkey, name, getattr(cls, name))
# NOTE: remember, there is no bound methods in Python 3
setattr(cls, name, six.get_unbound_function(method))
# NOTE: we don't serialize this fields since their values could be very long
# and one should not filter by their equality anyway.
NOT_SERIALIZED_FIELDS = (
models.FileField,
models.TextField, # One should not filter by long text equality
)
if hasattr(models, 'BinaryField'):
NOT_SERIALIZED_FIELDS += (models.BinaryField,)
def dnfs(qs):
"""
Converts query condition tree into a DNF of eq conds.
Separately for each alias.
Any negations, conditions with lookups other than __exact or __in,
conditions on joined models and subrequests are ignored.
__in is converted into = or = or = ...
"""
SOME = object()
SOME_COND = (None, None, SOME, True)
def negate(term):
return (term[0], term[1], term[2], not term[3])
def _dnf(where):
"""
Constructs DNF of where tree consisting of terms in form:
(alias, attribute, value, negation)
meaning `alias.attribute = value`
or `not alias.attribute = value` if negation is False
Any conditions other then eq are dropped.
"""
# Lookups appeared in Django 1.7
if isinstance(where, Lookup):
attname = where.lhs.target.attname
# TODO: check of all of this are possible
if isinstance(where.rhs, (QuerySet, Query, SQLEvaluator)):
return [[SOME_COND]]
# TODO: deal with transforms, aggregates and such in lhs
elif isinstance(where, Exact):
if isinstance(where.lhs.target, NOT_SERIALIZED_FIELDS):
return [[SOME_COND]]
else:
return [[(where.lhs.alias, attname, where.rhs, True)]]
elif isinstance(where, IsNull):
return [[(where.lhs.alias, attname, None, where.rhs)]]
elif isinstance(where, In) and len(where.rhs) < LONG_DISJUNCTION:
return [[(where.lhs.alias, attname, v, True)] for v in where.rhs]
else:
return [[SOME_COND]]
# Django 1.6 and earlier used tuples to encode conditions
elif isinstance(where, tuple):
constraint, lookup, annotation, value = where
attname = attname_of(model, constraint.col)
if isinstance(value, (QuerySet, Query, SQLEvaluator)):
return [[SOME_COND]]
elif lookup == 'exact':
# TODO: check for non-serialized for both exact and in
if isinstance(constraint.field, NOT_SERIALIZED_FIELDS):
return [[SOME_COND]]
else:
return [[(constraint.alias, attname, value, True)]]
elif lookup == 'isnull':
return [[(constraint.alias, attname, None, value)]]
elif lookup == 'in' and len(value) < LONG_DISJUNCTION:
return [[(constraint.alias, attname, v, True)] for v in value]
else:
return [[SOME_COND]]
elif isinstance(where, EverythingNode):
return [[]]
elif isinstance(where, NothingNode):
return []
elif isinstance(where, (ExtraWhere, SubqueryConstraint)):
return [[SOME_COND]]
elif len(where) == 0:
return [[]]
else:
chilren_dnfs = map(_dnf, where.children)
if len(chilren_dnfs) == 0:
return [[]]
elif len(chilren_dnfs) == 1:
result = chilren_dnfs[0]
else:
# Just unite children joined with OR
if where.connector == OR:
result = cat(chilren_dnfs)
# Use Cartesian product to AND children
else:
result = map(cat, product(*chilren_dnfs))
# Negating and expanding brackets
if where.negated:
result = [map(negate, p) for p in product(*result)]
return result
def clean_conj(conj, for_alias):
# "SOME" conds, negated conds and conds for other aliases should be stripped
return [(attname, value) for alias, attname, value, negation in conj
if value is not SOME and negation and alias == for_alias]
def clean_dnf(tree, for_alias):
cleaned = [clean_conj(conj, for_alias) for conj in tree]
# Any empty conjunction eats up the rest
# NOTE: a more elaborate DNF reduction is not really needed,
# just keep your querysets sane.
if not all(cleaned):
return [[]]
# To keep all schemes the same we sort conjunctions
return map(sorted, cleaned)
def table_for(alias):
if alias == main_alias:
return model._meta.db_table
else:
return qs.query.alias_map[alias][0]
where = qs.query.where
model = qs.model
main_alias = model._meta.db_table
dnf = _dnf(where)
aliases = set(alias for conj in dnf
for alias, _, _, _ in conj
if alias)
aliases.add(main_alias)
return [(table_for(alias), clean_dnf(dnf, alias)) for alias in aliases]
def attname_of(model, col, cache={}):
if model not in cache:
cache[model] = dict((f.db_column, f.attname) for f in model._meta.fields)
return cache[model].get(col, col)
@memoize
def stamp_fields(model):
"""
Returns serialized description of model fields.
"""
stamp = str([(f.name, f.attname, f.db_column, f.__class__) for f in model._meta.fields])
return md5hex(stamp)
### Cache keys calculation
def func_cache_key(func, args, kwargs, extra=None):
"""
Calculate cache key based on func and arguments
"""
factors = [func.__module__, func.__name__, func.__code__.co_firstlineno, args, kwargs, extra]
return md5hex(json.dumps(factors, sort_keys=True, default=str))
def view_cache_key(func, args, kwargs, extra=None):
"""
Calculate cache key for view func.
Use url instead of not properly serializable request argument.
"""
uri = args[0].build_absolute_uri()
return 'v:' + func_cache_key(func, args[1:], kwargs, extra=(uri, extra))
def cached_view_fab(_cached):
def cached_view(*dargs, **dkwargs):
def decorator(func):
dkwargs['_get_key'] = view_cache_key
cached_func = _cached(*dargs, **dkwargs)(func)
@wraps(func)
def wrapper(request, *args, **kwargs):
assert isinstance(request, HttpRequest), \
"A view should be passed with HttpRequest as first argument"
if request.method not in ('GET', 'HEAD'):
return func(request, *args, **kwargs)
return cached_func(request, *args, **kwargs)
return wrapper
return decorator
return cached_view
### Lua script loader
import os.path
@memoize
def load_script(name):
# TODO: strip comments
filename = os.path.join(os.path.dirname(__file__), 'lua/%s.lua' % name)
with open(filename) as f:
code = f.read()
return redis_client.register_script(code)
### Whitespace handling for template tags
import re
from django.utils.safestring import mark_safe
NEWLINE_BETWEEN_TAGS = mark_safe('>\n<')
SPACE_BETWEEN_TAGS = mark_safe('> <')
def carefully_strip_whitespace(text):
text = re.sub(r'>\s*\n\s*<', NEWLINE_BETWEEN_TAGS, text)
text = re.sub(r'>\s{2,}<', SPACE_BETWEEN_TAGS, text)
return text
|
from genderize import Genderize, GenderizeException
import csv
import sys
import os.path
import time
import argparse
import logging
import jpyhelper as jpyh
'''
Copyright (c) 2017 Jonathan Holtmann, MIT License
https://github.com/jholtmann/genderize_csv
'''
def genderize(args):
print(args)
#File initialization
dir_path = os.path.dirname(os.path.realpath(__file__))
logging.basicConfig(filename=dir_path + os.sep + "log.txt", level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(name)s %(message)s')
logger=logging.getLogger(__name__)
ofilename, ofile_extension = os.path.splitext(args.output)
ofile = ofilename + "_" + time.strftime("%Y%m%d-%H%M%S") + ".csv"
ifile = args.input
if os.path.isabs(ifile):
print("\n--- Input file: " + ifile)
else:
print("\n--- Input file: " + dir_path + os.sep + ifile)
if os.path.isabs(ofile):
print("--- Output file: " + ofile)
else:
print("--- Output file: " + dir_path + os.sep + ofile + "\n")
#File integruty checking
if not os.path.exists(ifile):
print("--- Input file does not exist. Exiting.\n")
sys.exit()
if not os.path.exists(os.path.dirname(ofile)):
print("--- Error! Invalid output file path. Exiting.\n")
sys.exit()
#Some set up stuff
csv.field_size_limit(sys.maxsize)
#Initialize API key
if not args.key == "NO_API":
print("--- API key: " + args.key + "\n")
genderize = Genderize(
user_agent='GenderizeDocs/0.0',
api_key=args.key)
key_present = True
else:
print("--- No API key provided.\n")
key_present = False
#Open ifile
with open(ifile, 'r', encoding="utf8") as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
names = []
for row in readCSV: #Read CSV into names list
names.append(row)
if args.noheader == False:
names.pop(0) #Remove header
o_names = list()
for l in names:
for b in l:
o_names.append(b)
if args.auto == True:
uniq_names = list(set(o_names))
chunks = list(jpyh.splitlist(uniq_names, 10));
print("--- Read CSV with " + str(len(names)) + " names. " + str(len(uniq_names)) + " unique.")
else:
chunks = list(jpyh.splitlist(names, 10));
print("--- Read CSV with " + str(len(names)) + " names")
print("--- Processed into " + str(len(chunks)) + " chunks")
if jpyh.query_yes_no("\n---! Ready to send to Genderdize. Proceed?") == False:
print("Exiting...\n")
sys.exit()
if os.path.isfile(ofile):
if jpyh.query_yes_no("---! Output file exists, overwrite?") == False:
print("Exiting...\n")
sys.exit()
print("\n")
if args.auto == True:
ofile = ofile + ".tmp"
response_time = [];
gender_responses = list()
with open(ofile, 'w', newline='', encoding="utf8") as f:
writer = csv.writer(f)
writer.writerow(list(["names", "gender", "probability", "count"]))
chunks_len = len(chunks)
stopped = False
for index, chunk in enumerate(chunks):
if stopped:
break
success = False
while not success:
try:
start = time.time()
if key_present:
dataset = genderize.get(chunk)
else:
dataset = Genderize().get(chunk)
gender_responses.append(dataset)
success = True
except GenderizeException as e:
#print("\n" + str(e))
logger.error(e)
#Error handling
if "response not in JSON format" in str(e) and args.catch == True:
if jpyh.query_yes_no("\n---!! 502 detected, try again?") == True:
success = False
continue
elif "Invalid API key" in str(e) and args.catch == True:
print("\n---!! Error, invalid API key! Check log file for details.\n")
else:
print("\n---!! GenderizeException - You probably exceeded the request limit, please add or purchase a API key. Check log file for details.\n")
stopped = True
break
response_time.append(time.time() - start)
print("Processed chunk " + str(index + 1) + " of " + str(chunks_len) + " -- Time remaining (est.): " + \
str( round( (sum(response_time) / len(response_time) * (chunks_len - index - 1)), 3)) + "s")
for data in dataset:
writer.writerow(data.values())
break
if args.auto == True:
print("\nCompleting identical names...\n")
#AUTOCOMPLETE NAMES
#Create master dict
gender_dict = dict()
for response in gender_responses:
for d in response:
gender_dict[d.get("name")] = [d.get("gender"), d.get("probability"), d.get("count")]
filename, file_extension = os.path.splitext(ofile)
with open(filename, 'w', newline='', encoding="utf8") as f:
writer = csv.writer(f)
writer.writerow(list(["names", "gender", "probability", "count"]))
for name in o_names:
data = gender_dict.get(name)
writer.writerow([name, data[0], data[1], data[2]])
print("Done!\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Bulk genderize.io script')
required = parser.add_argument_group('required arguments')
required.add_argument('-i','--input', help='Input file name', required=True)
required.add_argument('-o','--output', help='Output file name', required=True)
parser.add_argument('-k','--key', help='API key', required=False, default="NO_API")
parser.add_argument('-c','--catch', help='Try to gracefully handle errors', required=False, action='store_true', default=True)
parser.add_argument('-a','--auto', help='Automatically complete gender for identical names', required=False, action='store_true', default=False)
parser.add_argument('-nh','--noheader', help='Input has no header row', required=False, action='store_true', default=False)
genderize(parser.parse_args())
Implemented auto mode
from genderize import Genderize, GenderizeException
import csv
import sys
import os.path
import time
import argparse
import logging
import jpyhelper as jpyh
'''
Copyright (c) 2017 Jonathan Holtmann, MIT License
https://github.com/jholtmann/genderize_csv
'''
def genderize(args):
print(args)
#File initialization
dir_path = os.path.dirname(os.path.realpath(__file__))
logging.basicConfig(filename=dir_path + os.sep + "log.txt", level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(name)s %(message)s')
logger=logging.getLogger(__name__)
ofilename, ofile_extension = os.path.splitext(args.output)
ofile = ofilename + "_" + time.strftime("%Y%m%d-%H%M%S") + ".csv"
ifile = args.input
if os.path.isabs(ifile):
print("\n--- Input file: " + ifile)
else:
print("\n--- Input file: " + dir_path + os.sep + ifile)
if os.path.isabs(ofile):
print("--- Output file: " + ofile)
else:
print("--- Output file: " + dir_path + os.sep + ofile + "\n")
#File integruty checking
if not os.path.exists(ifile):
print("--- Input file does not exist. Exiting.\n")
sys.exit()
if not os.path.exists(os.path.dirname(ofile)):
print("--- Error! Invalid output file path. Exiting.\n")
sys.exit()
#Some set up stuff
csv.field_size_limit(sys.maxsize)
#Initialize API key
if not args.key == "NO_API":
print("--- API key: " + args.key + "\n")
genderize = Genderize(
user_agent='GenderizeDocs/0.0',
api_key=args.key)
key_present = True
else:
print("--- No API key provided.\n")
key_present = False
#Open ifile
with open(ifile, 'r', encoding="utf8") as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
names = []
for row in readCSV: #Read CSV into names list
names.append(row)
if args.noheader == False:
names.pop(0) #Remove header
o_names = list()
for l in names:
for b in l:
o_names.append(b)
if args.auto == True:
uniq_names = list(set(o_names))
chunks = list(jpyh.splitlist(uniq_names, 10));
print("--- Read CSV with " + str(len(names)) + " names. " + str(len(uniq_names)) + " unique.")
else:
chunks = list(jpyh.splitlist(names, 10));
print("--- Read CSV with " + str(len(names)) + " names")
print("--- Processed into " + str(len(chunks)) + " chunks")
if jpyh.query_yes_no("\n---! Ready to send to Genderdize. Proceed?") == False:
print("Exiting...\n")
sys.exit()
if os.path.isfile(ofile):
if jpyh.query_yes_no("---! Output file exists, overwrite?") == False:
print("Exiting...\n")
sys.exit()
print("\n")
if args.auto == True:
ofile = ofile + ".tmp"
response_time = [];
gender_responses = list()
with open(ofile, 'w', newline='', encoding="utf8") as f:
writer = csv.writer(f)
writer.writerow(list(["names", "gender", "probability", "count"]))
chunks_len = len(chunks)
stopped = False
for index, chunk in enumerate(chunks):
if stopped:
break
success = False
while not success:
try:
start = time.time()
if key_present:
dataset = genderize.get(chunk)
else:
dataset = Genderize().get(chunk)
gender_responses.append(dataset)
success = True
except GenderizeException as e:
#print("\n" + str(e))
logger.error(e)
#Error handling
if "response not in JSON format" in str(e) and args.catch == True:
if jpyh.query_yes_no("\n---!! 502 detected, try again?") == True:
success = False
continue
elif "Invalid API key" in str(e) and args.catch == True:
print("\n---!! Error, invalid API key! Check log file for details.\n")
else:
print("\n---!! GenderizeException - You probably exceeded the request limit, please add or purchase a API key. Check log file for details.\n")
stopped = True
break
response_time.append(time.time() - start)
print("Processed chunk " + str(index + 1) + " of " + str(chunks_len) + " -- Time remaining (est.): " + \
str( round( (sum(response_time) / len(response_time) * (chunks_len - index - 1)), 3)) + "s")
for data in dataset:
writer.writerow(data.values())
break
if args.auto == True:
print("\nCompleting identical names...\n")
#AUTOCOMPLETE NAMES
#Create master dict
gender_dict = dict()
for response in gender_responses:
for d in response:
gender_dict[d.get("name")] = [d.get("gender"), d.get("probability"), d.get("count")]
filename, file_extension = os.path.splitext(ofile)
with open(filename, 'w', newline='', encoding="utf8") as f:
writer = csv.writer(f)
writer.writerow(list(["names", "gender", "probability", "count"]))
for name in o_names:
data = gender_dict.get(name)
writer.writerow([name, data[0], data[1], data[2]])
print("Done!\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Bulk genderize.io script')
required = parser.add_argument_group('required arguments')
required.add_argument('-i','--input', help='Input file name', required=True)
required.add_argument('-o','--output', help='Output file name', required=True)
parser.add_argument('-k','--key', help='API key', required=False, default="NO_API")
parser.add_argument('-c','--catch', help='Try to handle errors gracefully', required=False, action='store_true', default=True)
parser.add_argument('-a','--auto', help='Automatically complete gender for identical names', required=False, action='store_true', default=False)
parser.add_argument('-nh','--noheader', help='Input has no header row', required=False, action='store_true', default=False)
genderize(parser.parse_args())
|
# -*- coding: utf-8 -*-
##
##
## Example of a Learning Activity Tree
##
##
if __name__ == "__main__":
import os
from django.core.wsgi import get_wsgi_application
print "####### DJANGO SETTINGS"
os.environ['DJANGO_SETTINGS_MODULE'] = "protoboard.settings"
application = get_wsgi_application()
from activitytree.models import LearningStyleInventory, LearningActivity, Course, UserLearningActivity
from django.contrib.auth.models import User
from activitytree.interaction_handler import SimpleSequencing
LearningActivity.objects.all().delete()
POO = LearningActivity( name = 'Prog OO en C#', slug = 'POO',
uri = "/activity/POO",
parent = None,
root = None,
flow = True,
forward_only = False,
choice = True,
choice_exit = False,
rollup_rule = "satisfied IF All satisfied",
rollup_objective = True,
rollup_progress = True,
is_container = True,
is_visible = True,
order_in_container = 0
)
POO.save()
description= u"""
<p> Que no te intimiden las palabras <code>class</code> , <code>abstract</code> , <code>override</code> o te dé miedo eso del
<strong> polimorfismo </strong> o te emociones con la <strong> herencia múltiple</strong>.</p>
<p> Ya deberías saber programación básica en algún lenguaje de programación. </p>"""
cursoPOO = Course(short_description=description, root=POO)
cursoPOO.save()
pretest = LearningActivity( name = 'Pretest', slug = 'Pretest',
uri = "/test/Pretest",
# lom = ,
parent = POO, root = POO,
pre_condition_rule = "",
post_condition_rule = "" ,
rollup_rule = "",
rollup_objective = True,
rollup_progress = True,
choice_exit = False,
is_container = False,
is_visible = False,
order_in_container = 1
)
pretest.save()
content = LearningActivity( name = 'Contenido', slug = 'Contenido',
uri = "/activity/Contenido",
# lom =
parent = POO, root = POO,
pre_condition_rule = "",
post_condition_rule = "",
flow = True,
forward_only = False,
choice = True,
choice_exit = False,
match_rule = "",
filter_rule = "",
rollup_rule = "satisfied IF Any satisfied",
rollup_objective = True,
rollup_progress = True,
is_container = True,
is_visible = True,
order_in_container = 2
)
content.save()
preliminar = LearningActivity( name = 'Comentario Preliminar', slug = 'Preliminar',
uri = "/activity/Preliminar",
# lom =
parent = content, root = POO,
pre_condition_rule = "",
post_condition_rule = "",
rollup_rule = "",
rollup_objective = True,
rollup_progress = True,
is_container = False,
is_visible = True,
order_in_container = 0
)
preliminar.save()
program_1 = LearningActivity( name = 'Ejercicio 1', slug = 'E1',
uri = "/program/1",
# lom =
parent = content, root = POO,
# pre_condition_rule = """self.recommendation_value = Text_Verbal.eval(self.user.learningstyleinventory.verbal,self.user.learningstyleinventory.visual)""" ,
pre_condition_rule = "",
post_condition_rule = "",
flow = True,
forward_only = True,
choice = False,
rollup_objective = True,
rollup_progress = True,
is_container = False,
is_visible = True, order_in_container = 1
)
program_1.save()
program_2 = LearningActivity( name = 'Ejercicio 2', slug = 'E2',
uri = "/program/2",
# lom =
parent = content, root = POO,
# pre_condition_rule = """self.recommendation_value = Text_Verbal.eval(self.user.learningstyleinventory.verbal,self.user.learningstyleinventory.visual)""" ,
pre_condition_rule = "",
post_condition_rule = "",
flow = True,
forward_only = True,
choice = False,
rollup_objective = True,
rollup_progress = True,
is_container = False,
is_visible = True, order_in_container = 2
)
program_2.save()
objetosyclases = LearningActivity( name = 'Objetos y Clases', slug = 'OBJETOS_CLASES',
uri = "/activity/Objetos_y_Clases",
# lom =
parent = content, root = POO,
# pre_condition_rule = """self.recommendation_value = Text_Verbal.eval(self.user.learningstyleinventory.verbal,self.user.learningstyleinventory.visual)""" ,
post_condition_rule = "",
flow = True,
forward_only = False,
choice = False,
rollup_rule = "",
rollup_objective = True,
rollup_progress = True,
is_container = True,
is_visible = False,
order_in_container = 1
)
objetosyclases.save()
objetos_y_clases_html = LearningActivity( name = 'Objetos y Clases HTML', slug = 'OBJETOS_CLASES_HTML',
uri = "/activity/Objetos_y_Clases_HTML",
# lom =
parent = objetosyclases, root = POO,
pre_condition_rule = "",
post_condition_rule = "",
flow = True,
forward_only = False,
choice = False,
rollup_objective = True,
rollup_progress = True,
is_container = False,
is_visible = True,
order_in_container = 0
)
objetos_y_clases_html.save()
objetos_y_clases_YouTube = LearningActivity( name = 'Objetos y Clases YouTube', slug = 'OBJETOS_CLASES_YouTube',
uri = "/activity/Objetos_y_Clases_YouTube",
# lom =
parent = objetosyclases, root = POO,
pre_condition_rule = "",
post_condition_rule = "",
flow = True,
forward_only = True,
choice = False,
rollup_objective = True,
rollup_progress = True,
is_container = False,
is_visible = True,
order_in_container = 1
)
objetos_y_clases_YouTube.save()
encapsulacion = LearningActivity( name = 'Encapsulacion', slug = 'Encapsulacion',
uri = "/activity/encapsulacion",
# lom =
parent = content, root = POO,
# pre_condition_rule = """self.recommendation_value = Text_Verbal.eval(self.user.learningstyleinventory.verbal,self.user.learningstyleinventory.visual)""" ,
pre_condition_rule = """self.pre_condition = 'disabled'""" ,
post_condition_rule = "",
flow = True,
forward_only = True,
choice = False,
rollup_rule = "",
rollup_objective = True,
rollup_progress = True,
is_container = True,
is_visible = True,
order_in_container = 4
)
encapsulacion.save()
encapsulacion_intro = LearningActivity( name = 'Encapsulacion Introduccion', slug = 'Encapsulacion_intro',
uri = "/activity/Encapsulacion_intro",
# lom =
parent = encapsulacion, root = POO,
pre_condition_rule = "",
post_condition_rule = "",
flow = True,
forward_only = True,
choice = False,
rollup_objective = True,
rollup_progress = True,
is_container = False,
is_visible = True,
order_in_container = 0
)
encapsulacion_intro.save()
encapsulacion_ejemplos = LearningActivity( name = 'Encapsulacion Ejemplos', slug = 'Encapsulacion_ejemplos',
uri = "/activity/Encapsulacion_Ejemplos",
# lom =
parent = encapsulacion, root = POO,
pre_condition_rule = "",
post_condition_rule = "",
flow = True,
forward_only = True,
choice = False,
rollup_objective = True,
rollup_progress = True,
is_container = False,
is_visible = True,
order_in_container = 1
)
encapsulacion_ejemplos.save()
herencia = LearningActivity( name = 'Herencia', slug = 'Herencia',
uri = "/activity/Herencia",
# lom =
parent = content, root = POO,
pre_condition_rule = "",
post_condition_rule = "",
flow = True,
forward_only = True,
choice = False,
rollup_objective = True,
rollup_progress = True,
is_container = False,
is_visible = True,
order_in_container = 7
)
herencia.save()
polimorfismo = LearningActivity( name = 'Polimorfismo', slug = 'polimorfismo',
uri = "/activity/Polimorfismo",
# lom =
parent = content, root = POO,
pre_condition_rule = "",
post_condition_rule = "",
flow = True,
forward_only = True,
choice = False,
rollup_objective = True,
rollup_progress = True,
is_container = False,
is_visible = True,
order_in_container = 14
)
polimorfismo.save()
posttest_root = LearningActivity( name = 'Post', slug = 'Post',
uri = "/activity/Post",
parent = POO,
root = POO,
flow = True,
forward_only = False,
choice = True,
choice_exit = False,
rollup_rule = "satisfied IF Any satisfied",
rollup_objective = True,
rollup_progress = True,
is_container = True,
is_visible = True,
order_in_container = 4
)
posttest_root.save()
posttest1 = LearningActivity( name = 'Posttest', slug = 'posttest',
uri = "/test/Posttest1",
# lom = ,
parent = posttest_root, root = POO,
pre_condition_rule = "",
post_condition_rule = "",
rollup_rule = "",
rollup_objective = True,
rollup_progress = True,
choice_exit = False,
is_container = False,
is_visible = True,
order_in_container = 23
)
posttest1.save()
posttest2 = LearningActivity( name = 'Posttest2', slug = 'posttest',
uri = "/test/Posttest2",
# lom = ,
parent = posttest_root, root = POO,
pre_condition_rule = "",
post_condition_rule = "",
rollup_rule = "",
rollup_objective = True,
rollup_progress = True,
choice_exit = False,
is_container = False,
is_visible = True,
order_in_container = 24
)
posttest2.save()
# posttest = LearningActivity( name = 'Posttest', slug = 'posttest',
# uri = "/test/Posttest",
# # lom = ,
# parent = POO, root = POO,
#
# pre_condition_rule = """if self.num_attempts == 0 :
# self.pre_condition = 'stopForwardTraversal' """,
# post_condition_rule = "",
#
# rollup_rule = "",
# rollup_objective = True,
# rollup_progress = True,
#
# choice_exit = False,
# is_container = False,
# is_visible = True,
# order_in_container = 23
# )
# posttest.save()
comentario_final = LearningActivity( name = 'Comentario_final', slug = 'comentario_final',
uri = "/activity/Comentario_final",
# lom =
parent = POO, root = POO,
pre_condition_rule = "",
post_condition_rule = "",
flow = True,
forward_only = True,
choice = False,
rollup_objective = True,
rollup_progress = True,
is_container = False,
is_visible = True,
order_in_container = 34
)
comentario_final.save()
##
##
##
## Example of two Users
##
##
User.objects.filter(username='ana').delete()
User.objects.filter(username='paul').delete()
j = User.objects.create_user('ana', 'lennon@thebeatles.com', '1234')
j.is_active = True
j.save()
p = User.objects.create_user('paul', 'paul@thebeatles.com', '1234')
p.is_active = True
p.save()
lsj=LearningStyleInventory(visual=12,verbal=11,aural=15,physical=9,logical=11,
social=9, solitary=10, user = j)
lsj.save()
lsp=LearningStyleInventory(visual=12,verbal=11,aural=20,physical=9,logical=11,
social=9, solitary=7, user = p)
lsp.save()
s = SimpleSequencing()
s.assignActivityTree(j,POO)
s.assignActivityTree(p,POO)
estudiantes = [
]
for e in estudiantes:
User.objects.filter(username=e[0]).delete()
u = User.objects.create_user(e[0],e[0], e[1])
u.is_active = True
u.save()
lsu=LearningStyleInventory(visual=e[2],verbal=e[3],aural=e[4],physical=e[5],logical=e[6],
social=e[7], solitary=e[8], user = u)
lsu.save()
ss = SimpleSequencing()
ss.assignActivityTree(u,POO)
import os
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "protoboard.settings")
##
##
## Assign Activity to both Users
##
##
#
# poo =UserLearningActivity.objects.filter(learning_activity__uri = "/activity/POO" ,user = User.objects.filter(username='paul')[0] )[0]
# ss = SimpleSequencing()
#
#
#a = ss.get_nav(poo)
#print ss.nav_to_xml(root=a)
#
#
#pre_j = UserLearningActivity.objects.filter(learning_activity__name = "Pretest" ,user = j )[0]
#s.set_current(pre_j)
#
#a = s.get_nav(root)
#print s.nav_to_xml(root=a)
#
#s.exit(pre_j, objective_measure = 0.20, objective_status = 'satisfied')
#
#a = s.get_nav(root)
#print s.nav_to_xml(root=a)
#
#s.set_current(j,remediation)
#s.exit(j,remediation, objective_measure = 0.80, objective_status = 'satisfied')
#a = s.get_nav(root)
#print s.nav_to_xml(root=a)
#
#
#s.set_current(j,general)
#s.exit(j,general, objective_measure = 0.80, objective_status = 'satisfied')
#a = s.get_nav(root)
#print s.nav_to_xml(root=a)
#root = UserLearningActivity.objects.filter(learning_activity__name = "Unit" ,user = j )[0]
#c = s.get_nav(root)
#print "-"*20
#print s.xml_children(root=c)
#
#s.set_current(j,general)
#s.exit(j, general, objective_measure = 0.80, objective_status = 'satisfied')
#root = UserLearningActivity.objects.filter(learning_activity__name = "Unit" ,user = j )[0]
#c = s.get_nav(root)
#print "-"*20
#print s.xml_children(root=c)
POO Test
# -*- coding: utf-8 -*-
activities = [
{'_id':'/test/demo',
'questions': [{'id': 1,
'interaction': 'simpleChoice',
'inline': 0 ,
'title': "Elige la opción correcta",
'question': "Las clases nos sirven para definr las caracterisitcas de un grupo de",
'can_check': "True",
'can_show': "True",
'can_hint': "True",
'options': ["Propiedades","Métodos","Instrucciones","Objetos"],
'answer': [0,0,0,1],
'answer_text': "Solo son México y USA",
'hints': ["España está en Europa", "Nicaragua es de Sudamérica"]
},
{'id': 2,
'interaction': 'choiceInteraction',
'inline': 0 ,
'title': "Elige la opción correcta",
'question': "¿Las clases se componen de?",
'can_check': "True",
'can_show': "True",
'can_hint': "True",
'options': ["Propiedades","Métodos","Instrucciones","Líneas de código"],
'answer': [1,1,0,0],
'answer_text': "",
'hints': [""]
},
{
'id':3,
'interaction': 'choiceInteraction',
'inline': 1,
'title': "Pregunta Abierta",
'question': "Son lenguajes de programación orientado a objetos",
'can_check': "True",
'can_show': "True",
'can_hint': "True",
'options': ["C","Java","Fortran","C++"],
'answer': [0,1,0,1],
},
{
'id':4,
'interaction': 'textEntryInteraction',
'inline': 0,
'title': "Estilo",
'question': "Es un enfoque particular o filosofía para diseñar y programar soluciones",
'can_check': "True",
'can_show': "True",
'can_hint': "True",
'options': [],
'type':"str",
'answer': ["Paradigma", "paradigma"],
},
{
'id':5,
'interaction': 'choiceInteraction',
'inline': 1,
'title': "Pregunta Abierta",
'question': "Son propiedades que tendría la clase Persona",
'can_check': "True",
'can_show': "True",
'can_hint': "True",
'options': ["comer()","nombre","fecha_de_caducidad","correo_electrónico"],
'answer': [0,1,0,1],
},
{
'id':6,
'interaction': 'choiceInteraction',
'inline': 1,
'title': "Pregunta Abierta",
'question': "Son métodos que tendría la clase Ave",
'can_check': "True",
'can_show': "True",
'can_hint': "True",
'options': ["imprimir()","volar()","comer()","ladrar()"],
'answer': [0,1,1,0],
},
],
'intro':"""<h3>Introducción</h3>
<p> Contesta las preguntas,correctamente. Mínimo Tres. </p>""",
'bye':"""""",
'satisfied_at_least':3
},
{'_id':'/program/csharp/1',
'title':u"Product.cs",
'initial_code':u"""
using System.IO;
using System;
public class Product
{
public code;
public desc;
public Product(int c, string d)
{
code=c;
desc=d;
}
public void Print()
{
Console.WriteLine("Producto {0}: {1}", code,desc);
}
}
""",
'description':u"Completa la definción de una clase sencilla",
'type':"Completa",
'icon':"puzzle-piece",
'level':'principiante',
'correct_code':u""" """,
'instructions':u"""
<h4>La clase <code>Product</code> tiene errores</h4>
<p>
La clase <code>Product</code> se utiliza en un programa de la siguiente manera:
</p>
<pre>
Product p = new Product(1, "iPhone 6");
p.Print();
</pre>
<p>
Completa el código para que funcione.
</p>
<row>
<button class="btn btn-info" type="button" data-toggle="collapse" data-target="#collapseExample" aria-expanded="false" aria-controls="collapseExample">
Ayuda
</button>
</row>
<div class="collapse" id="collapseExample">
<div class="well">
<p>
En C# al declarar los campos debes indicar su tipo de dato. Por ejemplo:
</p>
<pre>
public int intentos;
public string email;</pre>
</div>
</div>
""",
'unit_test':u"""
[TestFixture]
public class ProductTest
{
[Test, Description("Prueba del Constructor")]
public void Constructor()
{
Product p = new Product(1,"hola");
// Constraint Syntax
Assert.AreEqual(p.code,1);
}
[Test, Description("Imprimir la Descripción")]
public void PrintTest()
{
Product p = new Product(1,"hola");
p.Print();
using (StringWriter sw = new StringWriter())
{
Console.SetOut(sw);
p.Print();
string expected = "Producto 1: hola";
StringAssert.StartsWith(expected, sw.ToString());
}
}
}""",
"lang":"csharp" }
]
import os
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "protoboard.settings")
from pymongo import MongoClient
from django.conf import settings
client = MongoClient(settings.MONGO_DB)
db = client.protoboard_database
activities_collection = db.activities_collection
#print activities_collection.find_one({'_id':'/activity/Preliminar'})
activities_collection.remove()
activities_collection.insert(activities)
#activities_collection.remove()
#activities_collection.insert(activities)
|
#!/usr/bin/env python
DOCUMENTATION = '''
---
module: ossec_urls
short_description: Gather facts for OSSEC download URLs
description:
- Gather version, checksum, and URL info for OSSEC downloads
author:
- Conor Schaefer (@conorsch)
- Freedom of the Press Foundation (@freedomofpress)
requirements:
- requests
options:
ossec_version:
description:
- version number of release to download
default: "2.8.2"
required: no
notes:
- The OSSEC version to download is hardcoded to avoid surprises.
If you want a newer version than the current default, you should
pass the version in via I(ossec_version).
'''
EXAMPLES = '''
- ossec_urls:
ossec_version: "2.8.2"
'''
from StringIO import StringIO
from urlparse import urljoin
import re
HAS_REQUESTS = True
try:
import requests
except ImportError:
HAS_REQUESTS = False
class OSSECURLs():
def __init__(self, ossec_version):
self.ossec_version = ossec_version
checksums = self.parse_checksums()
self.ansible_facts = dict(
ossec_version=self.ossec_version,
ossec_tarball_filename=self.ossec_tarball_filename,
ossec_tarball_url=self.ossec_tarball_url,
ossec_checksum_filename=self.ossec_checksum_filename,
ossec_checksum_url=self.ossec_checksum_url,
)
self.ansible_facts.update(checksums)
@property
def ossec_tarball_filename(self):
return "ossec-hids-{}.tar.gz".format(self.ossec_version)
@property
def ossec_tarball_url(self):
return "https://github.com/ossec/ossec-hids/archive/{}.tar.gz".format(
self.ossec_version)
@property
def ossec_checksum_url(self):
return "https://github.com/ossec/ossec-hids/releases/download/{}/{}".format(
self.ossec_version, self.ossec_checksum_filename)
@property
def ossec_checksum_filename(self):
return "{}-checksum.txt".format(self.ossec_tarball_filename)
def parse_checksums(self):
r = requests.get(self.ossec_checksum_url)
checksum_regex = re.compile(r'''
^MD5\(
'''
+re.escape(self.ossec_tarball_filename)+
r'''\)=\s+(?P<ossec_md5_checksum>[0-9a-f]{32})\s+
SHA1\(
'''
+re.escape(self.ossec_tarball_filename)+
r'''\)=\s+(?P<ossec_sha1_checksum>[0-9a-f]{40})$
''', re.VERBOSE | re.MULTILINE
)
checksum_list = r.content.rstrip()
results = re.match(checksum_regex, checksum_list).groupdict()
return results
def main():
module = AnsibleModule(
argument_spec=dict(
ossec_version=dict(default="2.8.2" ),
),
supports_check_mode=False
)
if not HAS_REQUESTS:
module.fail_json(msg='requests required for this module')
ossec_version = module.params['ossec_version']
try:
ossec_config = OSSECURLs(ossec_version=ossec_version)
except:
msg = ("Failed to find checksum information for OSSEC v{}."
"Ensure you have the proper release specified, "
"and check the download page to confirm: "
"http://www.ossec.net/?page_id=19".format(ossec_version))
module.fail_json(msg=msg)
results = ossec_config.ansible_facts
if results:
module.exit_json(changed=False, ansible_facts=results)
else:
msg = "Failed to fetch OSSEC URL facts."
module.fail_json(msg=msg)
from ansible.module_utils.basic import *
main()
Flake8: Lints OSSEC URLs Ansible module
The Ansible module was pulled in from a separate repository (see #1468),
so the poor formatting is a product of lack of linting over there. Since
this is an Ansible module, and not a regular Python module, there are a
few common exceptions we'll make, specifically:
* F401
* E402
* F403
* F405
For reference, see:
https://github.com/HewlettPackard/oneview-ansible/blob/3dc8596861b10d16afe57a4d24bf7bd3dc514f3e/TESTING.md
#!/usr/bin/env python
DOCUMENTATION = '''
---
module: ossec_urls
short_description: Gather facts for OSSEC download URLs
description:
- Gather version, checksum, and URL info for OSSEC downloads
author:
- Conor Schaefer (@conorsch)
- Freedom of the Press Foundation (@freedomofpress)
requirements:
- requests
options:
ossec_version:
description:
- version number of release to download
default: "2.8.2"
required: no
notes:
- The OSSEC version to download is hardcoded to avoid surprises.
If you want a newer version than the current default, you should
pass the version in via I(ossec_version).
'''
EXAMPLES = '''
- ossec_urls:
ossec_version: "2.8.2"
'''
import re # noqa E402
HAS_REQUESTS = True
try:
import requests
except ImportError:
HAS_REQUESTS = False
class OSSECURLs():
def __init__(self, ossec_version):
self.ossec_version = ossec_version
checksums = self.parse_checksums()
self.ansible_facts = dict(
ossec_version=self.ossec_version,
ossec_tarball_filename=self.ossec_tarball_filename,
ossec_tarball_url=self.ossec_tarball_url,
ossec_checksum_filename=self.ossec_checksum_filename,
ossec_checksum_url=self.ossec_checksum_url,
)
self.ansible_facts.update(checksums)
@property
def ossec_tarball_filename(self):
return "ossec-hids-{}.tar.gz".format(self.ossec_version)
@property
def ossec_tarball_url(self):
return "https://github.com/ossec/ossec-hids/archive/{}.tar.gz".format(
self.ossec_version)
@property
def ossec_checksum_url(self):
return "https://github.com/ossec/ossec-hids/releases/download/{}/{}".format( # noqa E501
self.ossec_version, self.ossec_checksum_filename)
@property
def ossec_checksum_filename(self):
return "{}-checksum.txt".format(self.ossec_tarball_filename)
def parse_checksums(self):
r = requests.get(self.ossec_checksum_url)
checksum_regex = re.compile(r'''
^MD5\(
'''
+ re.escape(self.ossec_tarball_filename) +
r'''\)=\s+(?P<ossec_md5_checksum>[0-9a-f]{32})\s+
SHA1\(
'''
+ re.escape(self.ossec_tarball_filename) +
r'''\)=\s+(?P<ossec_sha1_checksum>[0-9a-f]{40})$
''', re.VERBOSE | re.MULTILINE
)
checksum_list = r.content.rstrip()
results = re.match(checksum_regex, checksum_list).groupdict()
return results
def main():
module = AnsibleModule( # noqa E405
argument_spec=dict(
ossec_version=dict(default="2.8.2"),
),
supports_check_mode=False
)
if not HAS_REQUESTS:
module.fail_json(msg='requests required for this module')
ossec_version = module.params['ossec_version']
try:
ossec_config = OSSECURLs(ossec_version=ossec_version)
except:
msg = ("Failed to find checksum information for OSSEC v{}."
"Ensure you have the proper release specified, "
"and check the download page to confirm: "
"http://www.ossec.net/?page_id=19".format(ossec_version))
module.fail_json(msg=msg)
results = ossec_config.ansible_facts
if results:
module.exit_json(changed=False, ansible_facts=results)
else:
msg = "Failed to fetch OSSEC URL facts."
module.fail_json(msg=msg)
from ansible.module_utils.basic import * # noqa E402,F403
main()
|
from django.conf.urls.defaults import *
from django.conf import settings
from leave.forms import StartRequestForm, RequesterForm, CheckRequestForm
from os.path import join, dirname
_dir = join(dirname(__file__))
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# FOR DEBUG AND TEST ONLY
(r'^.*/accounts/login.*switch/(?P<username>.*)/(?P<password>.*)/$', 'goflow.workflow.views.debug_switch_user', {'redirect':'/leave/'}),
(r'^.*/switch/(?P<username>.*)/(?P<password>.*)/$', 'goflow.workflow.views.debug_switch_user'),
# user connection
(r'^.*/logout/$', 'django.contrib.auth.views.logout'),
(r'^.*/accounts/login/$', 'django.contrib.auth.views.login', {'template_name':'goflow/login.html'}),
(r'^.*/password_change/$', 'django.contrib.auth.views.password_change'),
# static
(r'^images/(?P<path>.*)$', 'django.views.static.serve', {'document_root': join(_dir, 'media/img'), 'show_indexes': True}),
(r'^files/(.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
# home redirection
(r'^.*/home/$', 'django.views.generic.simple.redirect_to', {'url':'/leave/'}),
# home page
(r'^leave/$', 'django.views.generic.simple.direct_to_template', {'template':'leave.html'}),
# starting application
(r'^leave/start/$', 'goflow.apptools.views.start_application', {'process_name':'leave',
'form_class':StartRequestForm,
'template':'start_leave.html'}),
# applications
(r'^leave/checkstatus/(?P<id>.*)/$', 'goflow.apptools.views.edit_model', {'form_class':CheckRequestForm,
'template':'checkstatus.html'}),
(r'^leave/checkstatus_auto/$', 'leavedemo.leave.views.checkstatus_auto', {'notif_user':True}),
(r'^leave/refine/(?P<id>.*)/$', 'goflow.apptools.views.edit_model', {'form_class':RequesterForm,
'template':'refine.html'}),
(r'^leave/approvalform/(?P<id>.*)/$', 'goflow.apptools.views.edit_model', {'form_class':CheckRequestForm,
'template':'approval.html'}),
(r'^leave/hrform/(?P<id>.*)/$', 'goflow.apptools.views.view_application', {'template':'hrform.html'}),
(r'^leave/hr_auto/$', 'leavedemo.leave.auto.update_hr'),
(r'^leave/finalinfo/(?P<id>.*)/$', 'goflow.apptools.views.view_application', {'template':'finalinfo.html'}),
# administration
(r'^leave/admin/workflow/', include('goflow.apptools.urls_admin')),
(r'^leave/admin/graphics2/', include('goflow.graphics2.urls_admin')),
(r'^leave/admin/(.*)', admin.site.root),
# Goflow pages
(r'^leave/', include('goflow.urls')),
(r'^leave/send_mail/$', 'goflow.workflow.notification.send_mail'),
)
bugfix: change password
from django.conf.urls.defaults import *
from django.conf import settings
from leave.forms import StartRequestForm, RequesterForm, CheckRequestForm
from os.path import join, dirname
_dir = join(dirname(__file__))
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# FOR DEBUG AND TEST ONLY
(r'^.*/accounts/login.*switch/(?P<username>.*)/(?P<password>.*)/$', 'goflow.workflow.views.debug_switch_user', {'redirect':'/leave/'}),
(r'^.*/switch/(?P<username>.*)/(?P<password>.*)/$', 'goflow.workflow.views.debug_switch_user'),
# user connection
(r'^.*/logout/$', 'django.contrib.auth.views.logout'),
(r'^.*/accounts/login/$', 'django.contrib.auth.views.login', {'template_name':'goflow/login.html'}),
# static
(r'^images/(?P<path>.*)$', 'django.views.static.serve', {'document_root': join(_dir, 'media/img'), 'show_indexes': True}),
(r'^files/(.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
# home redirection
(r'^.*/home/$', 'django.views.generic.simple.redirect_to', {'url':'/leave/'}),
# home page
(r'^leave/$', 'django.views.generic.simple.direct_to_template', {'template':'leave.html'}),
# starting application
(r'^leave/start/$', 'goflow.apptools.views.start_application', {'process_name':'leave',
'form_class':StartRequestForm,
'template':'start_leave.html'}),
# applications
(r'^leave/checkstatus/(?P<id>.*)/$', 'goflow.apptools.views.edit_model', {'form_class':CheckRequestForm,
'template':'checkstatus.html'}),
(r'^leave/checkstatus_auto/$', 'leavedemo.leave.views.checkstatus_auto', {'notif_user':True}),
(r'^leave/refine/(?P<id>.*)/$', 'goflow.apptools.views.edit_model', {'form_class':RequesterForm,
'template':'refine.html'}),
(r'^leave/approvalform/(?P<id>.*)/$', 'goflow.apptools.views.edit_model', {'form_class':CheckRequestForm,
'template':'approval.html'}),
(r'^leave/hrform/(?P<id>.*)/$', 'goflow.apptools.views.view_application', {'template':'hrform.html'}),
(r'^leave/hr_auto/$', 'leavedemo.leave.auto.update_hr'),
(r'^leave/finalinfo/(?P<id>.*)/$', 'goflow.apptools.views.view_application', {'template':'finalinfo.html'}),
# administration
(r'^leave/admin/workflow/', include('goflow.apptools.urls_admin')),
(r'^leave/admin/graphics2/', include('goflow.graphics2.urls_admin')),
(r'^leave/admin/(.*)', admin.site.root),
# Goflow pages
(r'^leave/', include('goflow.urls')),
(r'^leave/send_mail/$', 'goflow.workflow.notification.send_mail'),
)
|
from django.db import models
from django.conf import settings
from django.core import serializers
from django.db.utils import DatabaseError
import tempfile
import time
import os
verbose = False
try:
RASTDIR = settings.RASTER_DIR
except:
RASTDIR = os.path.join(os.path.dirname(__file__), 'test_data')
RASTER_TYPES = (
("continuous", "continuous"),
("categorical", "catgorical"),
)
try:
STARSPAN_BIN = settings.STARSPAN_BIN
except:
STARSPAN_BIN = 'starspan'
class RasterDataset(models.Model):
name = models.CharField(max_length=30, unique=True)
full_name = models.CharField(max_length=255, default="")
filepath = models.FilePathField(path=RASTDIR, recursive=True)
type = models.CharField(max_length=30, choices=RASTER_TYPES)
def __unicode__(self):
return unicode(self.name + " raster at " + self.filepath)
class ZonalStatsCache(models.Model):
geom_hash = models.CharField(max_length=255)
raster = models.ForeignKey('RasterDataset')
avg = models.FloatField(null=True, blank=True)
min = models.FloatField(null=True, blank=True)
max = models.FloatField(null=True, blank=True)
mode = models.FloatField(null=True, blank=True)
median = models.FloatField(null=True, blank=True)
stdev = models.FloatField(null=True, blank=True)
nulls = models.FloatField(null=True, blank=True)
pixels = models.FloatField(null=True, blank=True)
date_modified = models.DateTimeField(auto_now=True)
@property
def json(self):
return serializers.serialize("json", self)
def __unicode__(self):
return unicode("Zonal Stats for %s - avg:%s , pixels:%s, nulls:%s" % (self.raster.name, self.avg, self.pixels, self.nulls))
class Meta:
unique_together = ('geom_hash', 'raster')
def geom_to_file(geom, filepath):
json = """{
"type": "FeatureCollection",
"features": [
{ "type": "Feature", "properties": { "id": 1 }, "geometry": %s }
]
}""" % geom.json
fh = open(filepath,'w')
fh.write(json)
fh.close()
assert os.path.exists(filepath)
def run_starspan_zonal(geom, rasterds, write_cache=False):
"""
Consider this a 'private' method .. dont call directly, use zonal_stats() instead
Runs starspan and returns a ZonalStatsCache object
If not write_cache, just return an unsaved object
"""
# Create tempdir and cd in
tmpdir = tempfile.gettempdir()
os.chdir(tmpdir)
# Output geom to temp dataset
timestamp = str(time.time())
out_json = os.path.join(tmpdir, 'geom_%s.json' % timestamp)
geom_to_file(geom, out_json)
# Run starspan
out_csv = os.path.join(tmpdir, 'output_%s_stats.csv' % timestamp)
if os.path.exists(out_csv):
os.remove(out_csv)
cmd = '%s --vector %s --where "id=1" --out-prefix %s/output_%s --out-type table --summary-suffix _stats.csv --raster %s --stats avg mode median min max sum stdev nulls ' % (STARSPAN_BIN,out_json,tmpdir, timestamp, rasterds.filepath)
if verbose: print cmd
starspan_out = os.popen(cmd).read()
if verbose: print starspan_out
if not os.path.exists(out_csv):
raise Exception("Starspan failed to produce output file: %s" % starspan_out)
res = open(out_csv,'r').readlines()
if verbose: print res
# Create zonal model
hash = geom.wkt.__hash__()
zonal = ZonalStatsCache(raster=rasterds, geom_hash=hash)
# Make sure we have valid results output by starspan
if len(res) == 2 and "Intersecting features: 0" not in starspan_out:
headers = [x.strip() for x in res[0].split(',')]
vals = [x.strip() for x in res[1].split(',')]
assert len(headers) == len(vals)
# loop and populate model
for i in range(len(headers)):
if "_Band1" in headers[i]:
stat_type = headers[i].replace("_Band1",'')
zonal.__dict__[stat_type] = float(vals[i])
elif headers[i] == 'numPixels':
zonal.pixels = float(vals[i])
# return zonal object (caching it if needed)
if write_cache:
try:
if zonal.pixels:
zonal.save()
except:
# Most likely another zonal stats cache for this geom/raster
# was saved to the cache before this one completed.
pass
return zonal
def clear_cache():
objs = ZonalStatsCache.objects.all()
if verbose: print "Clearing %s objects from cache" % len(objs)
objs.delete()
def zonal_stats(geom, rasterds, write_cache=True, read_cache=True, cache_only=False):
"""
Given a GEOSGeometry and a RasterDataset,
compute the zonal stats and return json like
{ 'raster': 'elevation', 'stats': {'sum': 10234.2, 'mean': 12.4}}
result can be stored in cache (write_cache)
and cache value is returned if read_cache
"""
if not geom.valid:
return None
hash = geom.wkt.__hash__()
cached = None
if read_cache:
try:
cached = ZonalStatsCache.objects.get(geom_hash=hash, raster=rasterds)
#except (ZonalStatsCache.DoesNotExist, DatabaseError):
except ZonalStatsCache.DoesNotExist:
cached = None
except DatabaseError:
cached = None
#cached = ZonalStatsCache(geom_hash=hash, raster=rasterds)
else:
write_cache = False #If we're not reading the cache, we're not going to write to it either
if cached:
result = cached
result.from_cache = True
else:
if cache_only:
# Return an empty result
result = ZonalStatsCache(geom_hash=hash, raster=rasterds)
else:
result = run_starspan_zonal(geom, rasterds, write_cache=write_cache)
result.from_cache = False
return result
remove print stmts
from django.db import models
from django.conf import settings
from django.core import serializers
from django.db.utils import DatabaseError
import tempfile
import time
import os
try:
RASTDIR = settings.RASTER_DIR
except:
RASTDIR = os.path.join(os.path.dirname(__file__), 'test_data')
RASTER_TYPES = (
("continuous", "continuous"),
("categorical", "catgorical"),
)
try:
STARSPAN_BIN = settings.STARSPAN_BIN
except:
STARSPAN_BIN = 'starspan'
class RasterDataset(models.Model):
name = models.CharField(max_length=30, unique=True)
full_name = models.CharField(max_length=255, default="")
filepath = models.FilePathField(path=RASTDIR, recursive=True)
type = models.CharField(max_length=30, choices=RASTER_TYPES)
def __unicode__(self):
return unicode(self.name + " raster at " + self.filepath)
class ZonalStatsCache(models.Model):
geom_hash = models.CharField(max_length=255)
raster = models.ForeignKey('RasterDataset')
avg = models.FloatField(null=True, blank=True)
min = models.FloatField(null=True, blank=True)
max = models.FloatField(null=True, blank=True)
mode = models.FloatField(null=True, blank=True)
median = models.FloatField(null=True, blank=True)
stdev = models.FloatField(null=True, blank=True)
nulls = models.FloatField(null=True, blank=True)
pixels = models.FloatField(null=True, blank=True)
date_modified = models.DateTimeField(auto_now=True)
@property
def json(self):
return serializers.serialize("json", self)
def __unicode__(self):
return unicode("Zonal Stats for %s - avg:%s , pixels:%s, nulls:%s" % (self.raster.name, self.avg, self.pixels, self.nulls))
class Meta:
unique_together = ('geom_hash', 'raster')
def geom_to_file(geom, filepath):
json = """{
"type": "FeatureCollection",
"features": [
{ "type": "Feature", "properties": { "id": 1 }, "geometry": %s }
]
}""" % geom.json
fh = open(filepath,'w')
fh.write(json)
fh.close()
assert os.path.exists(filepath)
def run_starspan_zonal(geom, rasterds, write_cache=False):
"""
Consider this a 'private' method .. dont call directly, use zonal_stats() instead
Runs starspan and returns a ZonalStatsCache object
If not write_cache, just return an unsaved object
"""
# Create tempdir and cd in
tmpdir = tempfile.gettempdir()
os.chdir(tmpdir)
# Output geom to temp dataset
timestamp = str(time.time())
out_json = os.path.join(tmpdir, 'geom_%s.json' % timestamp)
geom_to_file(geom, out_json)
# Run starspan
out_csv = os.path.join(tmpdir, 'output_%s_stats.csv' % timestamp)
if os.path.exists(out_csv):
os.remove(out_csv)
cmd = '%s --vector %s --where "id=1" --out-prefix %s/output_%s --out-type table --summary-suffix _stats.csv --raster %s --stats avg mode median min max sum stdev nulls ' % (STARSPAN_BIN,out_json,tmpdir, timestamp, rasterds.filepath)
starspan_out = os.popen(cmd).read()
if not os.path.exists(out_csv):
raise Exception("Starspan failed to produce output file: %s" % starspan_out)
res = open(out_csv,'r').readlines()
# Create zonal model
hash = geom.wkt.__hash__()
zonal = ZonalStatsCache(raster=rasterds, geom_hash=hash)
# Make sure we have valid results output by starspan
if len(res) == 2 and "Intersecting features: 0" not in starspan_out:
headers = [x.strip() for x in res[0].split(',')]
vals = [x.strip() for x in res[1].split(',')]
assert len(headers) == len(vals)
# loop and populate model
for i in range(len(headers)):
if "_Band1" in headers[i]:
stat_type = headers[i].replace("_Band1",'')
zonal.__dict__[stat_type] = float(vals[i])
elif headers[i] == 'numPixels':
zonal.pixels = float(vals[i])
# return zonal object (caching it if needed)
if write_cache:
try:
if zonal.pixels:
zonal.save()
except:
# Most likely another zonal stats cache for this geom/raster
# was saved to the cache before this one completed.
pass
return zonal
def clear_cache():
objs = ZonalStatsCache.objects.all()
objs.delete()
def zonal_stats(geom, rasterds, write_cache=True, read_cache=True, cache_only=False):
"""
Given a GEOSGeometry and a RasterDataset,
compute the zonal stats and return json like
{ 'raster': 'elevation', 'stats': {'sum': 10234.2, 'mean': 12.4}}
result can be stored in cache (write_cache)
and cache value is returned if read_cache
"""
if not geom.valid:
return None
hash = geom.wkt.__hash__()
cached = None
if read_cache:
try:
cached = ZonalStatsCache.objects.get(geom_hash=hash, raster=rasterds)
#except (ZonalStatsCache.DoesNotExist, DatabaseError):
except ZonalStatsCache.DoesNotExist:
cached = None
except DatabaseError:
cached = None
#cached = ZonalStatsCache(geom_hash=hash, raster=rasterds)
else:
write_cache = False #If we're not reading the cache, we're not going to write to it either
if cached:
result = cached
result.from_cache = True
else:
if cache_only:
# Return an empty result
result = ZonalStatsCache(geom_hash=hash, raster=rasterds)
else:
result = run_starspan_zonal(geom, rasterds, write_cache=write_cache)
result.from_cache = False
return result
|
#!/usr/bin/env python
from twisted.internet import epollreactor
epollreactor.install()
from zope.interface import Interface, implements
from twisted.cred import portal, checkers, credentials, error as credential_error
from twisted.cred.checkers import InMemoryUsernamePasswordDatabaseDontUse
from twisted.web import server, http
from twisted.web.guard import HTTPAuthSessionWrapper, DigestCredentialFactory
from twisted.internet import reactor
from twisted.application import service, internet
from twisted.python.log import ILogObserver, FileLogObserver
from twisted.python.logfile import DailyLogFile
from twisted.python import log
from txjsonrpc.auth import wrapResource
from txjsonrpc.web import jsonrpc
import logging
import math
import datetime
import time
import pyproj
import pytz
import json
json.encoder.FLOAT_REPR = lambda f: ("%.4f" % f)
import blitzortung
WGS84 = pyproj.Proj(init='epsg:4326')
UTM_EU = pyproj.Proj(init='epsg:32633') # UTM 33 N / WGS84
UTM_USA = pyproj.Proj(init='epsg:32614') # UTM 14 N / WGS84
UTM_OC = pyproj.Proj(init='epsg:32755') # UTM 55 S / WGS84
class PasswordDictChecker(object):
implements(checkers.ICredentialsChecker)
credentialInterfaces = (credentials.IUsernamePassword,)
def __init__(self, passwords):
self.passwords = passwords
def requestAvatarId(self, credentials):
username = credentials.username
if username in self.passwords:
if credentials.password == self.passwords[username]:
return defer.succeed(username)
return defer.fail(credential_error.UnathorizedLogin("invalid username/password"))
class IUserAvatar(Interface):
' should have attribute username '
class UserAvatar(object):
implements(IUserAvatar)
def __init__(self, username):
self.username = username
class TestRealm(object):
implements(portal.IRealm)
def __init__(self, users):
self.users = users
def requestAvatar(self, avatarId, mind, *interfaces):
if INamedUserAvatar in interfaces:
logout = lambda: None
return (IUserAvatar,
UserAvatar(avatarId),
logout)
else:
raise KeyError('none of the requested interfaces is supported')
class RasterData(object):
def __init__(self, min_lon, max_lon, min_lat, max_lat, coord_sys):
self.min_lon = min_lon
self.max_lon = max_lon
self.min_lat = min_lat
self.max_lat = max_lat
self.coord_sys = coord_sys
self.raster_data = {}
def fix_max(self, minimum, maximum, delta):
return minimum + math.floor((maximum - minimum)/delta)*delta
def get_for(self, baselength):
if baselength not in self.raster_data:
print "create raster data", self, "for baselength", baselength
ref_lon = (self.min_lon + self.max_lon) / 2.0
ref_lat = (self.min_lat + self.max_lat) / 2.0
utm_x, utm_y = pyproj.transform(WGS84, self.coord_sys, ref_lon, ref_lat)
lon_d, lat_d = pyproj.transform(self.coord_sys, WGS84, utm_x + baselength, utm_y + baselength)
delta_lon = lon_d - ref_lon
delta_lat = lat_d - ref_lat
max_lon = self.fix_max(self.min_lon, self.max_lon, delta_lon)
max_lat = self.fix_max(self.min_lat, self.max_lat, delta_lat)
self.raster_data[baselength] = blitzortung.geom.Raster(self.min_lon, max_lon, self.min_lat, max_lat, delta_lon, delta_lat, blitzortung.geom.Geometry.DefaultSrid)
return self.raster_data[baselength]
raster = {}
raster[1] = RasterData(-12, 35, 35, 65, UTM_EU)
raster[2] = RasterData(140, 180, -50, -10, UTM_OC)
raster[3] = RasterData(-140, -50, 10, 60, UTM_USA)
class Blitzortung(jsonrpc.JSONRPC):
"""
An example object to be published.
"""
addSlash = True
def __force_min(self, number, min_number):
return max(min_number, number)
def __force_max(self, number, max_number):
return min(max_number, number)
def __force_range(self, number, min_number, max_number):
return self.__force_min(self.__force_max(number, max_number), min_number)
def jsonrpc_get_strokes(self, minute_length, min_id=None):
minute_length = self.__force_range(minute_length, 0, 24*60)
strokedb = blitzortung.db.stroke()
endtime = datetime.datetime.utcnow()
endtime = endtime.replace(tzinfo=pytz.UTC)
endtime = endtime.replace(microsecond = 0)
starttime = endtime - datetime.timedelta(minutes=minute_length)
time_interval = blitzortung.db.TimeInterval(starttime, endtime)
if min_id is not None:
id_interval = blitzortung.db.IdInterval(min_id)
else:
id_interval = None
min_id = 0
area = None
order = blitzortung.db.Order('id')
reference_time = time.time()
strokes = strokedb.select(time_interval, id_interval, area, order)
query_time = time.time()
max_id = None
stroke_array = []
for stroke in strokes:
stroke_data = []
timestamp = stroke.get_timestamp()
stroke_data.append(((endtime - stroke.get_timestamp()).seconds))
stroke_data.append(stroke.get_x())
stroke_data.append(stroke.get_y())
stroke_data.append(stroke.get_lateral_error())
stroke_data.append(stroke.get_amplitude())
stroke_data.append(stroke.get_station_count())
stroke_data.append(stroke.get_type())
stroke_array.append(stroke_data)
max_id = stroke.get_id()
response = {}
response['s'] = stroke_array
response['t'] = endtime.strftime("%Y%m%dT%H:%M:%S")
if max_id:
response['next'] = long(max_id + 1)
print 'get_strokes(%d, %d): #%d (%.2fs)' %(minute_length, min_id, len(strokes), query_time - reference_time)
return response
def jsonrpc_get_strokes_around(self, longitude, latitude, minute_length, min_id=None):
pass
def jsonrpc_get_strokes_raster(self, minute_length, raster_baselength=10000, minute_offset=0, region=1):
raster_baselength = self.__force_min(raster_baselength, 5000)
minute_length = self.__force_range(minute_length, 0, 24 * 60)
minute_offset = self.__force_range(minute_offset, -24 * 60 + minute_length, 0)
strokedb = blitzortung.db.stroke()
endtime = datetime.datetime.utcnow()
endtime = endtime.replace(microsecond = 0)
endtime += datetime.timedelta(minutes=minute_offset)
starttime = endtime - datetime.timedelta(minutes=minute_length)
time_interval = blitzortung.db.TimeInterval(starttime, endtime)
reference_time = time.time()
raster_data = raster[region].get_for(raster_baselength)
raster_strokes = strokedb.select_raster(raster_data, time_interval)
histogram = strokedb.select_histogram(minute_length, region, 5)
query_time = time.time()
endtime = endtime.replace(tzinfo=pytz.UTC)
reduced_stroke_array = raster_strokes.to_reduced_array(endtime)
response = {}
response['r'] = reduced_stroke_array
response['xd'] = raster_data.get_x_div()
response['yd'] = raster_data.get_y_div()
response['x0'] = raster_data.get_x_min()
response['y1'] = raster_data.get_y_max()
response['xc'] = raster_data.get_x_bin_count()
response['yc'] = raster_data.get_y_bin_count()
response['t'] = endtime.strftime("%Y%m%dT%H:%M:%S")
response['h'] = histogram
print 'get_strokes_raster(%d, %d, %d, %d): #%d (%.2fs)' %(minute_length, raster_baselength, minute_offset, region, len(reduced_stroke_array), query_time - reference_time)
return response
def jsonrpc_get_stations(self):
stationsdb = blitzortung.db.station()
reference_time = time.time()
stations = stationsdb.select()
query_time = time.time()
station_array = []
for station in stations:
station_data = []
station_data.append(station.get_number())
station_data.append(station.get_location_name())
station_data.append(station.get_country())
station_data.append(station.get_x())
station_data.append(station.get_y())
if station.get_timestamp():
station_data.append(station.get_timestamp().strftime("%Y%m%dT%H:%M:%S.%f")[:-3])
else:
station_data.append('')
station_array.append(station_data)
response = {'stations': station_array}
print 'get_stations(): #%d (%.2fs)' %(len(stations), query_time - reference_time)
return response
users = {'test':'test'}
# Set up the application and the JSON-RPC resource.
application = service.Application("Blitzortung.org JSON-RPC Server")
logfile = DailyLogFile("webservice.log", "/var/log/blitzortung")
application.setComponent(ILogObserver, FileLogObserver(logfile).emit)
root = Blitzortung()
credentialFactory = DigestCredentialFactory("md5", "blitzortung.org")
# Define the credential checker the application will be using and wrap the JSON-RPC resource.
checker = InMemoryUsernamePasswordDatabaseDontUse()
checker.addUser('test','test')
realm_name = "Blitzortung.org JSON-RPC App"
wrappedRoot = wrapResource(root, [checker], realmName=realm_name)
class PublicHTMLRealm(object):
implements(portal.IRealm)
def requestAvatar(self, avatarId, mind, *interfaces):
if IResource in interfaces:
return (IResource, File("/home/%s/public_html" % (avatarId,)), lambda: None)
raise NotImplementedError()
portal = portal.Portal(PublicHTMLRealm(), [checker])
resource = HTTPAuthSessionWrapper(portal, [credentialFactory])
# With the wrapped root, we can set up the server as usual.
#site = server.Site(resource=wrappedRoot)
config = blitzortung.config.config()
site = server.Site(root)
jsonrpcServer = internet.TCPServer(config.get_webservice_port(), site)
jsonrpcServer.setServiceParent(application)
fixed histogram offset
removed debug output
#!/usr/bin/env python
from twisted.internet import epollreactor
epollreactor.install()
from zope.interface import Interface, implements
from twisted.cred import portal, checkers, credentials, error as credential_error
from twisted.cred.checkers import InMemoryUsernamePasswordDatabaseDontUse
from twisted.web import server, http
from twisted.web.guard import HTTPAuthSessionWrapper, DigestCredentialFactory
from twisted.internet import reactor
from twisted.application import service, internet
from twisted.python.log import ILogObserver, FileLogObserver
from twisted.python.logfile import DailyLogFile
from twisted.python import log
from txjsonrpc.auth import wrapResource
from txjsonrpc.web import jsonrpc
import logging
import math
import datetime
import time
import pyproj
import pytz
import json
json.encoder.FLOAT_REPR = lambda f: ("%.4f" % f)
import blitzortung
WGS84 = pyproj.Proj(init='epsg:4326')
UTM_EU = pyproj.Proj(init='epsg:32633') # UTM 33 N / WGS84
UTM_USA = pyproj.Proj(init='epsg:32614') # UTM 14 N / WGS84
UTM_OC = pyproj.Proj(init='epsg:32755') # UTM 55 S / WGS84
class PasswordDictChecker(object):
implements(checkers.ICredentialsChecker)
credentialInterfaces = (credentials.IUsernamePassword,)
def __init__(self, passwords):
self.passwords = passwords
def requestAvatarId(self, credentials):
username = credentials.username
if username in self.passwords:
if credentials.password == self.passwords[username]:
return defer.succeed(username)
return defer.fail(credential_error.UnathorizedLogin("invalid username/password"))
class IUserAvatar(Interface):
' should have attribute username '
class UserAvatar(object):
implements(IUserAvatar)
def __init__(self, username):
self.username = username
class TestRealm(object):
implements(portal.IRealm)
def __init__(self, users):
self.users = users
def requestAvatar(self, avatarId, mind, *interfaces):
if INamedUserAvatar in interfaces:
logout = lambda: None
return (IUserAvatar,
UserAvatar(avatarId),
logout)
else:
raise KeyError('none of the requested interfaces is supported')
class RasterData(object):
def __init__(self, min_lon, max_lon, min_lat, max_lat, coord_sys):
self.min_lon = min_lon
self.max_lon = max_lon
self.min_lat = min_lat
self.max_lat = max_lat
self.coord_sys = coord_sys
self.raster_data = {}
def fix_max(self, minimum, maximum, delta):
return minimum + math.floor((maximum - minimum)/delta)*delta
def get_for(self, baselength):
if baselength not in self.raster_data:
ref_lon = (self.min_lon + self.max_lon) / 2.0
ref_lat = (self.min_lat + self.max_lat) / 2.0
utm_x, utm_y = pyproj.transform(WGS84, self.coord_sys, ref_lon, ref_lat)
lon_d, lat_d = pyproj.transform(self.coord_sys, WGS84, utm_x + baselength, utm_y + baselength)
delta_lon = lon_d - ref_lon
delta_lat = lat_d - ref_lat
max_lon = self.fix_max(self.min_lon, self.max_lon, delta_lon)
max_lat = self.fix_max(self.min_lat, self.max_lat, delta_lat)
self.raster_data[baselength] = blitzortung.geom.Raster(self.min_lon, max_lon, self.min_lat, max_lat, delta_lon, delta_lat, blitzortung.geom.Geometry.DefaultSrid)
return self.raster_data[baselength]
raster = {}
raster[1] = RasterData(-12, 35, 35, 65, UTM_EU)
raster[2] = RasterData(140, 180, -50, -10, UTM_OC)
raster[3] = RasterData(-140, -50, 10, 60, UTM_USA)
class Blitzortung(jsonrpc.JSONRPC):
"""
An example object to be published.
"""
addSlash = True
def __force_min(self, number, min_number):
return max(min_number, number)
def __force_max(self, number, max_number):
return min(max_number, number)
def __force_range(self, number, min_number, max_number):
return self.__force_min(self.__force_max(number, max_number), min_number)
def jsonrpc_get_strokes(self, minute_length, min_id=None):
minute_length = self.__force_range(minute_length, 0, 24*60)
strokedb = blitzortung.db.stroke()
endtime = datetime.datetime.utcnow()
endtime = endtime.replace(tzinfo=pytz.UTC)
endtime = endtime.replace(microsecond = 0)
starttime = endtime - datetime.timedelta(minutes=minute_length)
time_interval = blitzortung.db.TimeInterval(starttime, endtime)
if min_id is not None:
id_interval = blitzortung.db.IdInterval(min_id)
else:
id_interval = None
min_id = 0
area = None
order = blitzortung.db.Order('id')
reference_time = time.time()
strokes = strokedb.select(time_interval, id_interval, area, order)
query_time = time.time()
max_id = None
stroke_array = []
for stroke in strokes:
stroke_data = []
timestamp = stroke.get_timestamp()
stroke_data.append(((endtime - stroke.get_timestamp()).seconds))
stroke_data.append(stroke.get_x())
stroke_data.append(stroke.get_y())
stroke_data.append(stroke.get_lateral_error())
stroke_data.append(stroke.get_amplitude())
stroke_data.append(stroke.get_station_count())
stroke_data.append(stroke.get_type())
stroke_array.append(stroke_data)
max_id = stroke.get_id()
response = {}
response['s'] = stroke_array
response['t'] = endtime.strftime("%Y%m%dT%H:%M:%S")
if max_id:
response['next'] = long(max_id + 1)
print 'get_strokes(%d, %d): #%d (%.2fs)' %(minute_length, min_id, len(strokes), query_time - reference_time)
return response
def jsonrpc_get_strokes_around(self, longitude, latitude, minute_length, min_id=None):
pass
def jsonrpc_get_strokes_raster(self, minute_length, raster_baselength=10000, minute_offset=0, region=1):
raster_baselength = self.__force_min(raster_baselength, 5000)
minute_length = self.__force_range(minute_length, 0, 24 * 60)
minute_offset = self.__force_range(minute_offset, -24 * 60 + minute_length, 0)
strokedb = blitzortung.db.stroke()
endtime = datetime.datetime.utcnow()
endtime = endtime.replace(microsecond = 0)
endtime += datetime.timedelta(minutes=minute_offset)
starttime = endtime - datetime.timedelta(minutes=minute_length)
time_interval = blitzortung.db.TimeInterval(starttime, endtime)
reference_time = time.time()
raster_data = raster[region].get_for(raster_baselength)
raster_strokes = strokedb.select_raster(raster_data, time_interval)
histogram = strokedb.select_histogram(minute_length, minute_offset, region, 5)
query_time = time.time()
endtime = endtime.replace(tzinfo=pytz.UTC)
reduced_stroke_array = raster_strokes.to_reduced_array(endtime)
response = {}
response['r'] = reduced_stroke_array
response['xd'] = raster_data.get_x_div()
response['yd'] = raster_data.get_y_div()
response['x0'] = raster_data.get_x_min()
response['y1'] = raster_data.get_y_max()
response['xc'] = raster_data.get_x_bin_count()
response['yc'] = raster_data.get_y_bin_count()
response['t'] = endtime.strftime("%Y%m%dT%H:%M:%S")
response['h'] = histogram
print 'get_strokes_raster(%d, %d, %d, %d): #%d (%.2fs)' %(minute_length, raster_baselength, minute_offset, region, len(reduced_stroke_array), query_time - reference_time)
return response
def jsonrpc_get_stations(self):
stationsdb = blitzortung.db.station()
reference_time = time.time()
stations = stationsdb.select()
query_time = time.time()
station_array = []
for station in stations:
station_data = []
station_data.append(station.get_number())
station_data.append(station.get_location_name())
station_data.append(station.get_country())
station_data.append(station.get_x())
station_data.append(station.get_y())
if station.get_timestamp():
station_data.append(station.get_timestamp().strftime("%Y%m%dT%H:%M:%S.%f")[:-3])
else:
station_data.append('')
station_array.append(station_data)
response = {'stations': station_array}
print 'get_stations(): #%d (%.2fs)' %(len(stations), query_time - reference_time)
return response
users = {'test':'test'}
# Set up the application and the JSON-RPC resource.
application = service.Application("Blitzortung.org JSON-RPC Server")
logfile = DailyLogFile("webservice.log", "/var/log/blitzortung")
application.setComponent(ILogObserver, FileLogObserver(logfile).emit)
root = Blitzortung()
credentialFactory = DigestCredentialFactory("md5", "blitzortung.org")
# Define the credential checker the application will be using and wrap the JSON-RPC resource.
checker = InMemoryUsernamePasswordDatabaseDontUse()
checker.addUser('test','test')
realm_name = "Blitzortung.org JSON-RPC App"
wrappedRoot = wrapResource(root, [checker], realmName=realm_name)
class PublicHTMLRealm(object):
implements(portal.IRealm)
def requestAvatar(self, avatarId, mind, *interfaces):
if IResource in interfaces:
return (IResource, File("/home/%s/public_html" % (avatarId,)), lambda: None)
raise NotImplementedError()
portal = portal.Portal(PublicHTMLRealm(), [checker])
resource = HTTPAuthSessionWrapper(portal, [credentialFactory])
# With the wrapped root, we can set up the server as usual.
#site = server.Site(resource=wrappedRoot)
config = blitzortung.config.config()
site = server.Site(root)
jsonrpcServer = internet.TCPServer(config.get_webservice_port(), site)
jsonrpcServer.setServiceParent(application)
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 12 12:07:04 2017
@author: tannerse
"""
from __future__ import division
import os
import fnmatch
import gzip
import re
import subprocess
import socket
directory = '/path/to/raw/data'
os.chdir(directory)
files = fnmatch.filter(directory,'*.gz')
count = 1
for filename in files:
filenew = directory + '\\data\\' + filename[:-3] + '.txt'
with gzip.open(filename, 'rb') as f, open(filenew,'a+') as j:
file_content = f.read()
total = []
chunk_size = 10000000
for x in range(0,len(file_content),chunk_size):
temp = re.sub(r'\n',r'',file_content[x:chunk_size])
temp = re.sub(r'person principal',r'\n',temp)
temp = re.sub(r'[\$]',r'',temp)
temp = re.sub(r'labelId',r'$',temp)
temp = re.sub(r'[<]',r'',temp)
temp = re.sub(r'[>]',r'',temp)
temp = re.sub(r'[=]',r'',temp)
temp = re.sub(r'["]',r'',temp)
total.append(temp)
y = y + 10000000
text = ''.join(total)
text = re.sub(r'person description',r'\n',text)
text = re.sub(r'labelId',r'$',text)
try:
os.remove(filenew)
except:
pass
j.write(text)
#run do file Stata
dofile = 'C:/path/to/census_compile.do'
#assuming Stata path is the following
cmd = ['C:\\Program Files (x86)\\Stata14\\StataSE-64.exe', 'do', dofile, filenew, str(count)]
subprocess.call(cmd)
os.remove(filenew)
count += 1
complete = count / len(files)
print 'Files processed: %d'%(count/)"
print 'Percent complete: %.2f'%(complete)\n"
Update
Clean up some of the code
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 12 12:07:04 2017
@author: tannerse
"""
from __future__ import division
import os
import fnmatch
import gzip
import re
import subprocess
import socket
directory = '/path/to/raw/data'
os.chdir(directory)
files = fnmatch.filter(directory,'*.gz')
count = 1
for filename in files:
filenew = directory + '\\data\\' + filename[:-3] + '.txt'
with gzip.open(filename, 'rb') as f, open(filenew,'a+') as j:
file_content = f.read()
total = []
chunk_size = 10000000
for x in range(0,len(file_content),chunk_size):
temp = re.sub(r'\n',r'',file_content[x:chunk_size])
temp = re.sub(r'person principal',r'\n',temp)
temp = re.sub(r'[\$]',r'',temp)
temp = re.sub(r'labelId',r'$',temp)
temp = re.sub(r'[<]',r'',temp)
temp = re.sub(r'[>]',r'',temp)
temp = re.sub(r'[=]',r'',temp)
temp = re.sub(r'["]',r'',temp)
total.append(temp)
y = y + 10000000
text = ''.join(total)
text = re.sub(r'person description',r'\n',text)
text = re.sub(r'labelId',r'$',text)
try:
os.remove(filenew)
except:
pass
j.write(text)
#run do file Stata
dofile = 'C:/path/to/census_compile.do'
#assuming Stata path is the following
cmd = ['C:\\Program Files (x86)\\Stata14\\StataSE-64.exe', 'do', dofile, filenew, str(count)]
subprocess.call(cmd)
os.remove(filenew)
count += 1
complete = count / len(files)
print 'Files processed: %d'%(count)"
print 'Percent complete: %.2f \n'%(complete)"
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""The Suzuki-Trotter product formula."""
from typing import Callable, Optional, Union
import warnings
import numpy as np
from qiskit.circuit.quantumcircuit import QuantumCircuit
from qiskit.quantum_info.operators import SparsePauliOp, Pauli
from .product_formula import ProductFormula
class SuzukiTrotter(ProductFormula):
r"""The (higher order) Suzuki-Trotter product formula.
The Suzuki-Trotter formulas improve the error of the Lie-Trotter approximation.
For example, the second order decomposition is
.. math::
e^{A + B} \approx e^{B/2} e^{A} e^{B/2}.
Higher order decompositions are based on recursions, see Ref. [1] for more details.
In this implementation, the operators are provided as sum terms of a Pauli operator.
For example, in the second order Suzuki-Trotter decomposition we approximate
.. math::
e^{-it(XX + ZZ)} = e^{-it/2 ZZ}e^{-it XX}e^{-it/2 ZZ} + \mathcal{O}(t^2).
References:
[1]: D. Berry, G. Ahokas, R. Cleve and B. Sanders,
"Efficient quantum algorithms for simulating sparse Hamiltonians" (2006).
`arXiv:quant-ph/0508139 <https://arxiv.org/abs/quant-ph/0508139>`_
"""
def __init__(
self,
order: int = 2,
reps: int = 1,
insert_barriers: bool = False,
cx_structure: str = "chain",
atomic_evolution: Optional[
Callable[[Union[Pauli, SparsePauliOp], float], QuantumCircuit]
] = None,
) -> None:
"""
Args:
order: The order of the product formula.
reps: The number of time steps.
insert_barriers: Whether to insert barriers between the atomic evolutions.
cx_structure: How to arrange the CX gates for the Pauli evolutions, can be "chain",
where next neighbor connections are used, or "fountain", where all qubits are
connected to one.
atomic_evolution: A function to construct the circuit for the evolution of single
Pauli string. Per default, a single Pauli evolution is decomopsed in a CX chain
and a single qubit Z rotation.
"""
if order % 2 == 1:
warnings.warn(
"SuzukiTrotter for odd orders is deprecated as of 0.20.0, and will be "
"removed no earlier than 3 months after that release date. Suzuki "
"product formulae are symmetric and therefore only defined for even"
"orders.",
DeprecationWarning,
stacklevel=2,
)
# TODO replace deprecation warning by the following error and add unit test for odd
# raise ValueError("Suzuki product formulae are symmetric and therefore only defined "
# "for even orders.")
super().__init__(order, reps, insert_barriers, cx_structure, atomic_evolution)
def synthesize(self, evolution):
# get operators and time to evolve
operators = evolution.operator
time = evolution.time
if not isinstance(operators, list):
pauli_list = [(Pauli(op), np.real(coeff)) for op, coeff in operators.to_list()]
else:
pauli_list = [(op, 1) for op in operators]
ops_to_evolve = self._recurse(self.order, time / self.reps, pauli_list)
# construct the evolution circuit
single_rep = QuantumCircuit(operators[0].num_qubits)
first_barrier = False
for op, coeff in ops_to_evolve:
# add barriers
if first_barrier:
if self.insert_barriers:
single_rep.barrier()
else:
first_barrier = True
single_rep.compose(self.atomic_evolution(op, coeff), wrap=True, inplace=True)
evolution_circuit = QuantumCircuit(operators[0].num_qubits)
first_barrier = False
for _ in range(self.reps):
# add barriers
if first_barrier:
if self.insert_barriers:
single_rep.barrier()
else:
first_barrier = True
evolution_circuit.compose(single_rep, inplace=True)
return evolution_circuit
@staticmethod
def _recurse(order, time, pauli_list):
if order == 1:
return pauli_list
elif order == 2:
halves = [(op, coeff * time / 2) for op, coeff in pauli_list[:-1]]
full = [(pauli_list[-1][0], time * pauli_list[-1][1])]
return halves + full + list(reversed(halves))
else:
reduction = 1 / (4 - 4 ** (1 / (order - 1)))
outer = 2 * SuzukiTrotter._recurse(
order - 2, time=reduction * time, pauli_list=pauli_list
)
inner = SuzukiTrotter._recurse(
order - 2, time=(1 - 4 * reduction) * time, pauli_list=pauli_list
)
return outer + inner + outer
Fix the order of the Suzuki-Trotter formula in the documentation (#8171)
My previous commit https://github.com/Qiskit/qiskit-terra/pull/8167 corrects the documentation for the Lie-Trotter formula. According to my understanding of [1], the Suzuki-Trotter formula is second order accurate. This is now corrected in this commit.
[1] https://arxiv.org/pdf/math-ph/0506007.pdf
Co-authored-by: mergify[bot] <b09a6ee808b67e98a221404e7aaa52e0398a4954@users.noreply.github.com>
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""The Suzuki-Trotter product formula."""
from typing import Callable, Optional, Union
import warnings
import numpy as np
from qiskit.circuit.quantumcircuit import QuantumCircuit
from qiskit.quantum_info.operators import SparsePauliOp, Pauli
from .product_formula import ProductFormula
class SuzukiTrotter(ProductFormula):
r"""The (higher order) Suzuki-Trotter product formula.
The Suzuki-Trotter formulas improve the error of the Lie-Trotter approximation.
For example, the second order decomposition is
.. math::
e^{A + B} \approx e^{B/2} e^{A} e^{B/2}.
Higher order decompositions are based on recursions, see Ref. [1] for more details.
In this implementation, the operators are provided as sum terms of a Pauli operator.
For example, in the second order Suzuki-Trotter decomposition we approximate
.. math::
e^{-it(XX + ZZ)} = e^{-it/2 ZZ}e^{-it XX}e^{-it/2 ZZ} + \mathcal{O}(t^3).
References:
[1]: D. Berry, G. Ahokas, R. Cleve and B. Sanders,
"Efficient quantum algorithms for simulating sparse Hamiltonians" (2006).
`arXiv:quant-ph/0508139 <https://arxiv.org/abs/quant-ph/0508139>`_
[2]: N. Hatano and M. Suzuki,
"Finding Exponential Product Formulas of Higher Orders" (2005).
`arXiv:math-ph/0506007 <https://arxiv.org/pdf/math-ph/0506007.pdf>`_
"""
def __init__(
self,
order: int = 2,
reps: int = 1,
insert_barriers: bool = False,
cx_structure: str = "chain",
atomic_evolution: Optional[
Callable[[Union[Pauli, SparsePauliOp], float], QuantumCircuit]
] = None,
) -> None:
"""
Args:
order: The order of the product formula.
reps: The number of time steps.
insert_barriers: Whether to insert barriers between the atomic evolutions.
cx_structure: How to arrange the CX gates for the Pauli evolutions, can be "chain",
where next neighbor connections are used, or "fountain", where all qubits are
connected to one.
atomic_evolution: A function to construct the circuit for the evolution of single
Pauli string. Per default, a single Pauli evolution is decomopsed in a CX chain
and a single qubit Z rotation.
"""
if order % 2 == 1:
warnings.warn(
"SuzukiTrotter for odd orders is deprecated as of 0.20.0, and will be "
"removed no earlier than 3 months after that release date. Suzuki "
"product formulae are symmetric and therefore only defined for even"
"orders.",
DeprecationWarning,
stacklevel=2,
)
# TODO replace deprecation warning by the following error and add unit test for odd
# raise ValueError("Suzuki product formulae are symmetric and therefore only defined "
# "for even orders.")
super().__init__(order, reps, insert_barriers, cx_structure, atomic_evolution)
def synthesize(self, evolution):
# get operators and time to evolve
operators = evolution.operator
time = evolution.time
if not isinstance(operators, list):
pauli_list = [(Pauli(op), np.real(coeff)) for op, coeff in operators.to_list()]
else:
pauli_list = [(op, 1) for op in operators]
ops_to_evolve = self._recurse(self.order, time / self.reps, pauli_list)
# construct the evolution circuit
single_rep = QuantumCircuit(operators[0].num_qubits)
first_barrier = False
for op, coeff in ops_to_evolve:
# add barriers
if first_barrier:
if self.insert_barriers:
single_rep.barrier()
else:
first_barrier = True
single_rep.compose(self.atomic_evolution(op, coeff), wrap=True, inplace=True)
evolution_circuit = QuantumCircuit(operators[0].num_qubits)
first_barrier = False
for _ in range(self.reps):
# add barriers
if first_barrier:
if self.insert_barriers:
single_rep.barrier()
else:
first_barrier = True
evolution_circuit.compose(single_rep, inplace=True)
return evolution_circuit
@staticmethod
def _recurse(order, time, pauli_list):
if order == 1:
return pauli_list
elif order == 2:
halves = [(op, coeff * time / 2) for op, coeff in pauli_list[:-1]]
full = [(pauli_list[-1][0], time * pauli_list[-1][1])]
return halves + full + list(reversed(halves))
else:
reduction = 1 / (4 - 4 ** (1 / (order - 1)))
outer = 2 * SuzukiTrotter._recurse(
order - 2, time=reduction * time, pauli_list=pauli_list
)
inner = SuzukiTrotter._recurse(
order - 2, time=(1 - 4 * reduction) * time, pauli_list=pauli_list
)
return outer + inner + outer
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from mock import call
from quantum.agent.linux.iptables_firewall import IptablesFirewallDriver
from quantum.tests.unit import test_api_v2
from quantum.tests import base
_uuid = test_api_v2._uuid
FAKE_PREFIX = {'IPv4': '10.0.0.0/24',
'IPv6': 'fe80::0/48'}
FAKE_IP = {'IPv4': '10.0.0.1',
'IPv6': 'fe80::1'}
class IptablesFirewallTestCase(base.BaseTestCase):
def setUp(self):
super(IptablesFirewallTestCase, self).setUp()
self.utils_exec_p = mock.patch(
'quantum.agent.linux.utils.execute')
self.utils_exec = self.utils_exec_p.start()
self.addCleanup(self.utils_exec_p.stop)
self.iptables_cls_p = mock.patch(
'quantum.agent.linux.iptables_manager.IptablesManager')
iptables_cls = self.iptables_cls_p.start()
self.addCleanup(self.iptables_cls_p.stop)
self.iptables_inst = mock.Mock()
self.v4filter_inst = mock.Mock()
self.v6filter_inst = mock.Mock()
self.iptables_inst.ipv4 = {'filter': self.v4filter_inst}
self.iptables_inst.ipv6 = {'filter': self.v6filter_inst}
iptables_cls.return_value = self.iptables_inst
self.firewall = IptablesFirewallDriver()
self.firewall.iptables = self.iptables_inst
def _fake_port(self):
return {'device': 'tapfake_dev',
'mac_address': 'ff:ff:ff:ff',
'fixed_ips': [FAKE_IP['IPv4'],
FAKE_IP['IPv6']]}
def test_prepare_port_filter_with_no_sg(self):
port = self._fake_port()
self.firewall.prepare_port_filter(port)
calls = [call.add_chain('sg-fallback'),
call.add_rule('sg-fallback', '-j DROP'),
call.ensure_remove_chain('sg-chain'),
call.add_chain('sg-chain'),
call.add_chain('ifake_dev'),
call.add_rule('FORWARD',
'-m physdev --physdev-is-bridged '
'--physdev-out tapfake_dev '
'-j $sg-chain'),
call.add_rule('sg-chain',
'-m physdev --physdev-is-bridged '
'--physdev-out tapfake_dev '
'-j $ifake_dev'),
call.add_rule(
'ifake_dev', '-m state --state INVALID -j DROP'),
call.add_rule(
'ifake_dev',
'-m state --state ESTABLISHED,RELATED -j RETURN'),
call.add_rule('ifake_dev', '-j $sg-fallback'),
call.add_chain('ofake_dev'),
call.add_rule('FORWARD',
'-m physdev --physdev-is-bridged '
'--physdev-in tapfake_dev '
'-j $sg-chain'),
call.add_rule('sg-chain',
'-m physdev --physdev-is-bridged '
'--physdev-in tapfake_dev '
'-j $ofake_dev'),
call.add_rule('INPUT',
'-m physdev --physdev-is-bridged '
'--physdev-in tapfake_dev '
'-j $ofake_dev'),
call.add_rule(
'ofake_dev', '-m mac ! --mac-source ff:ff:ff:ff -j DROP'),
call.add_rule(
'ofake_dev',
'-p udp --sport 68 --dport 67 -j RETURN'),
call.add_rule('ofake_dev', '! -s 10.0.0.1 -j DROP'),
call.add_rule(
'ofake_dev',
'-p udp --sport 67 --dport 68 -j DROP'),
call.add_rule(
'ofake_dev', '-m state --state INVALID -j DROP'),
call.add_rule(
'ofake_dev',
'-m state --state ESTABLISHED,RELATED -j RETURN'),
call.add_rule('ofake_dev', '-j $sg-fallback'),
call.add_rule('sg-chain', '-j ACCEPT')]
self.v4filter_inst.assert_has_calls(calls)
def test_filter_ipv4_ingress(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress'}
ingress = call.add_rule('ifake_dev', '-j RETURN')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'source_ip_prefix': prefix}
ingress = call.add_rule('ifake_dev', '-j RETURN -s %s' % prefix)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp'}
ingress = call.add_rule('ifake_dev', '-j RETURN -p tcp')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp',
'source_ip_prefix': prefix}
ingress = call.add_rule('ifake_dev', '-j RETURN -p tcp -s %s' % prefix)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_icmp(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'icmp'}
ingress = call.add_rule('ifake_dev', '-j RETURN -p icmp')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_icmp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'icmp',
'source_ip_prefix': prefix}
ingress = call.add_rule(
'ifake_dev', '-j RETURN -p icmp -s %s' % prefix)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 10}
ingress = call.add_rule('ifake_dev', '-j RETURN -p tcp --dport 10')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp_mport(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100}
ingress = call.add_rule(
'ifake_dev',
'-j RETURN -p tcp -m multiport --dports 10:100')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
ingress = call.add_rule(
'ifake_dev',
'-j RETURN -p tcp -m multiport '
'--dports 10:100 -s %s' % prefix)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp'}
ingress = call.add_rule('ifake_dev', '-j RETURN -p udp')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp',
'source_ip_prefix': prefix}
ingress = call.add_rule('ifake_dev', '-j RETURN -p udp -s %s' % prefix)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 10}
ingress = call.add_rule('ifake_dev', '-j RETURN -p udp --dport 10')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp_mport(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100}
ingress = call.add_rule(
'ifake_dev',
'-j RETURN -p udp -m multiport --dports 10:100')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
ingress = call.add_rule(
'ifake_dev',
'-j RETURN -p udp -m multiport '
'--dports 10:100 -s %s' % prefix)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress'}
egress = call.add_rule('ofake_dev', '-j RETURN')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'source_ip_prefix': prefix}
egress = call.add_rule('ofake_dev', '-j RETURN -s %s' % prefix)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp'}
egress = call.add_rule('ofake_dev', '-j RETURN -p tcp')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp',
'source_ip_prefix': prefix}
egress = call.add_rule('ofake_dev', '-j RETURN -p tcp -s %s' % prefix)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'icmp'}
egress = call.add_rule('ofake_dev', '-j RETURN -p icmp')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'icmp',
'source_ip_prefix': prefix}
egress = call.add_rule(
'ofake_dev', '-j RETURN -p icmp -s %s' % prefix)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 10}
egress = call.add_rule('ofake_dev', '-j RETURN -p tcp --dport 10')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp_mport(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100}
egress = call.add_rule(
'ofake_dev',
'-j RETURN -p tcp -m multiport --dports 10:100')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
egress = call.add_rule(
'ofake_dev',
'-j RETURN -p tcp -m multiport '
'--dports 10:100 -s %s' % prefix)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp'}
egress = call.add_rule('ofake_dev', '-j RETURN -p udp')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp',
'source_ip_prefix': prefix}
egress = call.add_rule('ofake_dev', '-j RETURN -p udp -s %s' % prefix)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 10}
egress = call.add_rule('ofake_dev', '-j RETURN -p udp --dport 10')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp_mport(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100}
egress = call.add_rule(
'ofake_dev',
'-j RETURN -p udp -m multiport --dports 10:100')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
egress = call.add_rule(
'ofake_dev',
'-j RETURN -p udp -m multiport '
'--dports 10:100 -s %s' % prefix)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress'}
ingress = call.add_rule('ifake_dev', '-j RETURN')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'source_ip_prefix': prefix}
ingress = call.add_rule('ifake_dev', '-j RETURN -s %s' % prefix)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp'}
ingress = call.add_rule('ifake_dev', '-j RETURN -p tcp')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp',
'source_ip_prefix': prefix}
ingress = call.add_rule('ifake_dev', '-j RETURN -p tcp -s %s' % prefix)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp_port(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 10}
ingress = call.add_rule('ifake_dev', '-j RETURN -p tcp --dport 10')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_icmp(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'icmp'}
ingress = call.add_rule('ifake_dev', '-j RETURN -p icmpv6')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_icmp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'icmp',
'source_ip_prefix': prefix}
ingress = call.add_rule(
'ifake_dev', '-j RETURN -p icmpv6 -s %s' % prefix)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp_mport(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100}
ingress = call.add_rule(
'ifake_dev',
'-j RETURN -p tcp -m multiport --dports 10:100')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
ingress = call.add_rule(
'ifake_dev',
'-j RETURN -p tcp -m multiport '
'--dports 10:100 -s %s' % prefix)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp'}
ingress = call.add_rule('ifake_dev', '-j RETURN -p udp')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp',
'source_ip_prefix': prefix}
ingress = call.add_rule('ifake_dev', '-j RETURN -p udp -s %s' % prefix)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp_port(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 10}
ingress = call.add_rule('ifake_dev', '-j RETURN -p udp --dport 10')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp_mport(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100}
ingress = call.add_rule(
'ifake_dev',
'-j RETURN -p udp -m multiport --dports 10:100')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
ingress = call.add_rule(
'ifake_dev',
'-j RETURN -p udp -m multiport '
'--dports 10:100 -s %s' % prefix)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress'}
egress = call.add_rule('ofake_dev', '-j RETURN')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'source_ip_prefix': prefix}
egress = call.add_rule('ofake_dev', '-j RETURN -s %s' % prefix)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp'}
egress = call.add_rule('ofake_dev', '-j RETURN -p tcp')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp',
'source_ip_prefix': prefix}
egress = call.add_rule('ofake_dev', '-j RETURN -p tcp -s %s' % prefix)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmp'}
egress = call.add_rule('ofake_dev', '-j RETURN -p icmpv6')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmp',
'source_ip_prefix': prefix}
egress = call.add_rule(
'ofake_dev', '-j RETURN -p icmpv6 -s %s' % prefix)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp_port(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 10}
egress = call.add_rule('ofake_dev', '-j RETURN -p tcp --dport 10')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp_mport(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100}
egress = call.add_rule(
'ofake_dev',
'-j RETURN -p tcp -m multiport --dports 10:100')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
egress = call.add_rule(
'ofake_dev',
'-j RETURN -p tcp -m multiport '
'--dports 10:100 -s %s' % prefix)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp'}
egress = call.add_rule('ofake_dev', '-j RETURN -p udp')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp',
'source_ip_prefix': prefix}
egress = call.add_rule('ofake_dev', '-j RETURN -p udp -s %s' % prefix)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp_port(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 10}
egress = call.add_rule('ofake_dev', '-j RETURN -p udp --dport 10')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp_mport(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100}
egress = call.add_rule(
'ofake_dev',
'-j RETURN -p udp -m multiport --dports 10:100')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
egress = call.add_rule(
'ofake_dev',
'-j RETURN -p udp -m multiport '
'--dports 10:100 -s %s' % prefix)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def _test_prepare_port_filter(self,
rule,
ingress_expected_call=None,
egress_expected_call=None):
port = self._fake_port()
ethertype = rule['ethertype']
prefix = FAKE_IP[ethertype]
filter_inst = self.v4filter_inst
dhcp_rule = call.add_rule(
'ofake_dev',
'-p udp --sport 68 --dport 67 -j RETURN')
if ethertype == 'IPv6':
filter_inst = self.v6filter_inst
dhcp_rule = call.add_rule('ofake_dev', '-p icmpv6 -j RETURN')
sg = [rule]
port['security_group_rules'] = sg
self.firewall.prepare_port_filter(port)
calls = [call.add_chain('sg-fallback'),
call.add_rule('sg-fallback', '-j DROP'),
call.ensure_remove_chain('sg-chain'),
call.add_chain('sg-chain'),
call.add_chain('ifake_dev'),
call.add_rule('FORWARD',
'-m physdev --physdev-is-bridged '
'--physdev-out tapfake_dev '
'-j $sg-chain'),
call.add_rule('sg-chain',
'-m physdev --physdev-is-bridged '
'--physdev-out tapfake_dev '
'-j $ifake_dev'),
call.add_rule(
'ifake_dev', '-m state --state INVALID -j DROP'),
call.add_rule(
'ifake_dev',
'-m state --state ESTABLISHED,RELATED -j RETURN')]
if ingress_expected_call:
calls.append(ingress_expected_call)
calls += [call.add_rule('ifake_dev', '-j $sg-fallback'),
call.add_chain('ofake_dev'),
call.add_rule('FORWARD',
'-m physdev --physdev-is-bridged '
'--physdev-in tapfake_dev '
'-j $sg-chain'),
call.add_rule('sg-chain',
'-m physdev --physdev-is-bridged '
'--physdev-in tapfake_dev '
'-j $ofake_dev'),
call.add_rule('INPUT',
'-m physdev --physdev-is-bridged '
'--physdev-in tapfake_dev '
'-j $ofake_dev'),
call.add_rule(
'ofake_dev',
'-m mac ! --mac-source ff:ff:ff:ff -j DROP'),
dhcp_rule,
call.add_rule('ofake_dev', '! -s %s -j DROP' % prefix)]
if ethertype == 'IPv4':
calls.append(call.add_rule(
'ofake_dev',
'-p udp --sport 67 --dport 68 -j DROP'))
calls += [call.add_rule(
'ofake_dev', '-m state --state INVALID -j DROP'),
call.add_rule(
'ofake_dev',
'-m state --state ESTABLISHED,RELATED -j RETURN')]
if egress_expected_call:
calls.append(egress_expected_call)
calls += [call.add_rule('ofake_dev', '-j $sg-fallback'),
call.add_rule('sg-chain', '-j ACCEPT')]
filter_inst.assert_has_calls(calls)
def test_update_delete_port_filter(self):
port = self._fake_port()
port['security_group_rules'] = [{'ethertype': 'IPv4',
'direction': 'ingress'}]
self.firewall.prepare_port_filter(port)
port['security_group_rules'] = [{'ethertype': 'IPv4',
'direction': 'egress'}]
self.firewall.update_port_filter(port)
self.firewall.update_port_filter({'device': 'no-exist-device'})
self.firewall.remove_port_filter(port)
self.firewall.remove_port_filter({'device': 'no-exist-device'})
calls = [call.add_chain('sg-fallback'),
call.add_rule('sg-fallback', '-j DROP'),
call.ensure_remove_chain('sg-chain'),
call.add_chain('sg-chain'),
call.add_chain('ifake_dev'),
call.add_rule(
'FORWARD',
'-m physdev --physdev-is-bridged '
'--physdev-out tapfake_dev -j $sg-chain'),
call.add_rule(
'sg-chain',
'-m physdev --physdev-is-bridged '
'--physdev-out tapfake_dev -j $ifake_dev'),
call.add_rule(
'ifake_dev', '-m state --state INVALID -j DROP'),
call.add_rule(
'ifake_dev',
'-m state --state ESTABLISHED,RELATED -j RETURN'),
call.add_rule('ifake_dev', '-j RETURN'),
call.add_rule('ifake_dev', '-j $sg-fallback'),
call.add_chain('ofake_dev'),
call.add_rule(
'FORWARD',
'-m physdev --physdev-is-bridged '
'--physdev-in tapfake_dev -j $sg-chain'),
call.add_rule(
'sg-chain',
'-m physdev --physdev-is-bridged '
'--physdev-in tapfake_dev -j $ofake_dev'),
call.add_rule(
'INPUT',
'-m physdev --physdev-is-bridged '
'--physdev-in tapfake_dev -j $ofake_dev'),
call.add_rule(
'ofake_dev',
'-m mac ! --mac-source ff:ff:ff:ff -j DROP'),
call.add_rule(
'ofake_dev',
'-p udp --sport 68 --dport 67 -j RETURN'),
call.add_rule(
'ofake_dev',
'! -s 10.0.0.1 -j DROP'),
call.add_rule(
'ofake_dev',
'-p udp --sport 67 --dport 68 -j DROP'),
call.add_rule(
'ofake_dev', '-m state --state INVALID -j DROP'),
call.add_rule(
'ofake_dev',
'-m state --state ESTABLISHED,RELATED -j RETURN'),
call.add_rule('ofake_dev', '-j $sg-fallback'),
call.add_rule('sg-chain', '-j ACCEPT'),
call.ensure_remove_chain('ifake_dev'),
call.ensure_remove_chain('ofake_dev'),
call.ensure_remove_chain('sg-chain'),
call.add_chain('sg-chain'),
call.add_chain('ifake_dev'),
call.add_rule(
'FORWARD',
'-m physdev --physdev-is-bridged '
'--physdev-out tapfake_dev -j $sg-chain'),
call.add_rule(
'sg-chain',
'-m physdev --physdev-is-bridged '
'--physdev-out tapfake_dev -j $ifake_dev'),
call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP'),
call.add_rule(
'ifake_dev',
'-m state --state ESTABLISHED,RELATED -j RETURN'),
call.add_rule('ifake_dev', '-j $sg-fallback'),
call.add_chain('ofake_dev'),
call.add_rule(
'FORWARD',
'-m physdev --physdev-is-bridged '
'--physdev-in tapfake_dev -j $sg-chain'),
call.add_rule(
'sg-chain',
'-m physdev --physdev-is-bridged '
'--physdev-in tapfake_dev -j $ofake_dev'),
call.add_rule(
'INPUT',
'-m physdev --physdev-is-bridged '
'--physdev-in tapfake_dev -j $ofake_dev'),
call.add_rule(
'ofake_dev',
'-m mac ! --mac-source ff:ff:ff:ff -j DROP'),
call.add_rule(
'ofake_dev', '-p udp --sport 68 --dport 67 -j RETURN'),
call.add_rule(
'ofake_dev', '! -s 10.0.0.1 -j DROP'),
call.add_rule(
'ofake_dev', '-p udp --sport 67 --dport 68 -j DROP'),
call.add_rule(
'ofake_dev', '-m state --state INVALID -j DROP'),
call.add_rule(
'ofake_dev',
'-m state --state ESTABLISHED,RELATED -j RETURN'),
call.add_rule('ofake_dev', '-j RETURN'),
call.add_rule('ofake_dev', '-j $sg-fallback'),
call.add_rule('sg-chain', '-j ACCEPT'),
call.ensure_remove_chain('ifake_dev'),
call.ensure_remove_chain('ofake_dev'),
call.ensure_remove_chain('sg-chain'),
call.add_chain('sg-chain')]
self.v4filter_inst.assert_has_calls(calls)
def test_remove_unknown_port(self):
port = self._fake_port()
self.firewall.remove_port_filter(port)
# checking no exception occures
self.v4filter_inst.assert_has_calls([])
def test_defer_apply(self):
with self.firewall.defer_apply():
pass
self.iptables_inst.assert_has_calls([call.defer_apply_on(),
call.defer_apply_off()])
def test_filter_defer_with_exception(self):
try:
with self.firewall.defer_apply():
raise Exception("same exception")
except:
pass
self.iptables_inst.assert_has_calls([call.defer_apply_on(),
call.defer_apply_off()])
Registers root_helper option for test_iptables_firewall
Fixes bug #1146478
Change-Id: I908a142d9463719e7b3c94aa1d699d329c9259de
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from mock import call
from oslo.config import cfg
from quantum.agent.common import config as a_cfg
from quantum.agent.linux.iptables_firewall import IptablesFirewallDriver
from quantum.tests.unit import test_api_v2
from quantum.tests import base
_uuid = test_api_v2._uuid
FAKE_PREFIX = {'IPv4': '10.0.0.0/24',
'IPv6': 'fe80::0/48'}
FAKE_IP = {'IPv4': '10.0.0.1',
'IPv6': 'fe80::1'}
class IptablesFirewallTestCase(base.BaseTestCase):
def setUp(self):
super(IptablesFirewallTestCase, self).setUp()
cfg.CONF.register_opts(a_cfg.ROOT_HELPER_OPTS, 'AGENT')
self.utils_exec_p = mock.patch(
'quantum.agent.linux.utils.execute')
self.utils_exec = self.utils_exec_p.start()
self.addCleanup(self.utils_exec_p.stop)
self.iptables_cls_p = mock.patch(
'quantum.agent.linux.iptables_manager.IptablesManager')
iptables_cls = self.iptables_cls_p.start()
self.addCleanup(self.iptables_cls_p.stop)
self.iptables_inst = mock.Mock()
self.v4filter_inst = mock.Mock()
self.v6filter_inst = mock.Mock()
self.iptables_inst.ipv4 = {'filter': self.v4filter_inst}
self.iptables_inst.ipv6 = {'filter': self.v6filter_inst}
iptables_cls.return_value = self.iptables_inst
self.firewall = IptablesFirewallDriver()
self.firewall.iptables = self.iptables_inst
def _fake_port(self):
return {'device': 'tapfake_dev',
'mac_address': 'ff:ff:ff:ff',
'fixed_ips': [FAKE_IP['IPv4'],
FAKE_IP['IPv6']]}
def test_prepare_port_filter_with_no_sg(self):
port = self._fake_port()
self.firewall.prepare_port_filter(port)
calls = [call.add_chain('sg-fallback'),
call.add_rule('sg-fallback', '-j DROP'),
call.ensure_remove_chain('sg-chain'),
call.add_chain('sg-chain'),
call.add_chain('ifake_dev'),
call.add_rule('FORWARD',
'-m physdev --physdev-is-bridged '
'--physdev-out tapfake_dev '
'-j $sg-chain'),
call.add_rule('sg-chain',
'-m physdev --physdev-is-bridged '
'--physdev-out tapfake_dev '
'-j $ifake_dev'),
call.add_rule(
'ifake_dev', '-m state --state INVALID -j DROP'),
call.add_rule(
'ifake_dev',
'-m state --state ESTABLISHED,RELATED -j RETURN'),
call.add_rule('ifake_dev', '-j $sg-fallback'),
call.add_chain('ofake_dev'),
call.add_rule('FORWARD',
'-m physdev --physdev-is-bridged '
'--physdev-in tapfake_dev '
'-j $sg-chain'),
call.add_rule('sg-chain',
'-m physdev --physdev-is-bridged '
'--physdev-in tapfake_dev '
'-j $ofake_dev'),
call.add_rule('INPUT',
'-m physdev --physdev-is-bridged '
'--physdev-in tapfake_dev '
'-j $ofake_dev'),
call.add_rule(
'ofake_dev', '-m mac ! --mac-source ff:ff:ff:ff -j DROP'),
call.add_rule(
'ofake_dev',
'-p udp --sport 68 --dport 67 -j RETURN'),
call.add_rule('ofake_dev', '! -s 10.0.0.1 -j DROP'),
call.add_rule(
'ofake_dev',
'-p udp --sport 67 --dport 68 -j DROP'),
call.add_rule(
'ofake_dev', '-m state --state INVALID -j DROP'),
call.add_rule(
'ofake_dev',
'-m state --state ESTABLISHED,RELATED -j RETURN'),
call.add_rule('ofake_dev', '-j $sg-fallback'),
call.add_rule('sg-chain', '-j ACCEPT')]
self.v4filter_inst.assert_has_calls(calls)
def test_filter_ipv4_ingress(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress'}
ingress = call.add_rule('ifake_dev', '-j RETURN')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'source_ip_prefix': prefix}
ingress = call.add_rule('ifake_dev', '-j RETURN -s %s' % prefix)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp'}
ingress = call.add_rule('ifake_dev', '-j RETURN -p tcp')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp',
'source_ip_prefix': prefix}
ingress = call.add_rule('ifake_dev', '-j RETURN -p tcp -s %s' % prefix)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_icmp(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'icmp'}
ingress = call.add_rule('ifake_dev', '-j RETURN -p icmp')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_icmp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'icmp',
'source_ip_prefix': prefix}
ingress = call.add_rule(
'ifake_dev', '-j RETURN -p icmp -s %s' % prefix)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 10}
ingress = call.add_rule('ifake_dev', '-j RETURN -p tcp --dport 10')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp_mport(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100}
ingress = call.add_rule(
'ifake_dev',
'-j RETURN -p tcp -m multiport --dports 10:100')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
ingress = call.add_rule(
'ifake_dev',
'-j RETURN -p tcp -m multiport '
'--dports 10:100 -s %s' % prefix)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp'}
ingress = call.add_rule('ifake_dev', '-j RETURN -p udp')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp',
'source_ip_prefix': prefix}
ingress = call.add_rule('ifake_dev', '-j RETURN -p udp -s %s' % prefix)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 10}
ingress = call.add_rule('ifake_dev', '-j RETURN -p udp --dport 10')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp_mport(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100}
ingress = call.add_rule(
'ifake_dev',
'-j RETURN -p udp -m multiport --dports 10:100')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
ingress = call.add_rule(
'ifake_dev',
'-j RETURN -p udp -m multiport '
'--dports 10:100 -s %s' % prefix)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress'}
egress = call.add_rule('ofake_dev', '-j RETURN')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'source_ip_prefix': prefix}
egress = call.add_rule('ofake_dev', '-j RETURN -s %s' % prefix)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp'}
egress = call.add_rule('ofake_dev', '-j RETURN -p tcp')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp',
'source_ip_prefix': prefix}
egress = call.add_rule('ofake_dev', '-j RETURN -p tcp -s %s' % prefix)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'icmp'}
egress = call.add_rule('ofake_dev', '-j RETURN -p icmp')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'icmp',
'source_ip_prefix': prefix}
egress = call.add_rule(
'ofake_dev', '-j RETURN -p icmp -s %s' % prefix)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 10}
egress = call.add_rule('ofake_dev', '-j RETURN -p tcp --dport 10')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp_mport(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100}
egress = call.add_rule(
'ofake_dev',
'-j RETURN -p tcp -m multiport --dports 10:100')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
egress = call.add_rule(
'ofake_dev',
'-j RETURN -p tcp -m multiport '
'--dports 10:100 -s %s' % prefix)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp'}
egress = call.add_rule('ofake_dev', '-j RETURN -p udp')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp',
'source_ip_prefix': prefix}
egress = call.add_rule('ofake_dev', '-j RETURN -p udp -s %s' % prefix)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 10}
egress = call.add_rule('ofake_dev', '-j RETURN -p udp --dport 10')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp_mport(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100}
egress = call.add_rule(
'ofake_dev',
'-j RETURN -p udp -m multiport --dports 10:100')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
egress = call.add_rule(
'ofake_dev',
'-j RETURN -p udp -m multiport '
'--dports 10:100 -s %s' % prefix)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress'}
ingress = call.add_rule('ifake_dev', '-j RETURN')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'source_ip_prefix': prefix}
ingress = call.add_rule('ifake_dev', '-j RETURN -s %s' % prefix)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp'}
ingress = call.add_rule('ifake_dev', '-j RETURN -p tcp')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp',
'source_ip_prefix': prefix}
ingress = call.add_rule('ifake_dev', '-j RETURN -p tcp -s %s' % prefix)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp_port(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 10}
ingress = call.add_rule('ifake_dev', '-j RETURN -p tcp --dport 10')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_icmp(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'icmp'}
ingress = call.add_rule('ifake_dev', '-j RETURN -p icmpv6')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_icmp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'icmp',
'source_ip_prefix': prefix}
ingress = call.add_rule(
'ifake_dev', '-j RETURN -p icmpv6 -s %s' % prefix)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp_mport(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100}
ingress = call.add_rule(
'ifake_dev',
'-j RETURN -p tcp -m multiport --dports 10:100')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
ingress = call.add_rule(
'ifake_dev',
'-j RETURN -p tcp -m multiport '
'--dports 10:100 -s %s' % prefix)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp'}
ingress = call.add_rule('ifake_dev', '-j RETURN -p udp')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp',
'source_ip_prefix': prefix}
ingress = call.add_rule('ifake_dev', '-j RETURN -p udp -s %s' % prefix)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp_port(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 10}
ingress = call.add_rule('ifake_dev', '-j RETURN -p udp --dport 10')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp_mport(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100}
ingress = call.add_rule(
'ifake_dev',
'-j RETURN -p udp -m multiport --dports 10:100')
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
ingress = call.add_rule(
'ifake_dev',
'-j RETURN -p udp -m multiport '
'--dports 10:100 -s %s' % prefix)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress'}
egress = call.add_rule('ofake_dev', '-j RETURN')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'source_ip_prefix': prefix}
egress = call.add_rule('ofake_dev', '-j RETURN -s %s' % prefix)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp'}
egress = call.add_rule('ofake_dev', '-j RETURN -p tcp')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp',
'source_ip_prefix': prefix}
egress = call.add_rule('ofake_dev', '-j RETURN -p tcp -s %s' % prefix)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmp'}
egress = call.add_rule('ofake_dev', '-j RETURN -p icmpv6')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmp',
'source_ip_prefix': prefix}
egress = call.add_rule(
'ofake_dev', '-j RETURN -p icmpv6 -s %s' % prefix)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp_port(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 10}
egress = call.add_rule('ofake_dev', '-j RETURN -p tcp --dport 10')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp_mport(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100}
egress = call.add_rule(
'ofake_dev',
'-j RETURN -p tcp -m multiport --dports 10:100')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
egress = call.add_rule(
'ofake_dev',
'-j RETURN -p tcp -m multiport '
'--dports 10:100 -s %s' % prefix)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp'}
egress = call.add_rule('ofake_dev', '-j RETURN -p udp')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp',
'source_ip_prefix': prefix}
egress = call.add_rule('ofake_dev', '-j RETURN -p udp -s %s' % prefix)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp_port(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 10}
egress = call.add_rule('ofake_dev', '-j RETURN -p udp --dport 10')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp_mport(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100}
egress = call.add_rule(
'ofake_dev',
'-j RETURN -p udp -m multiport --dports 10:100')
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
egress = call.add_rule(
'ofake_dev',
'-j RETURN -p udp -m multiport '
'--dports 10:100 -s %s' % prefix)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def _test_prepare_port_filter(self,
rule,
ingress_expected_call=None,
egress_expected_call=None):
port = self._fake_port()
ethertype = rule['ethertype']
prefix = FAKE_IP[ethertype]
filter_inst = self.v4filter_inst
dhcp_rule = call.add_rule(
'ofake_dev',
'-p udp --sport 68 --dport 67 -j RETURN')
if ethertype == 'IPv6':
filter_inst = self.v6filter_inst
dhcp_rule = call.add_rule('ofake_dev', '-p icmpv6 -j RETURN')
sg = [rule]
port['security_group_rules'] = sg
self.firewall.prepare_port_filter(port)
calls = [call.add_chain('sg-fallback'),
call.add_rule('sg-fallback', '-j DROP'),
call.ensure_remove_chain('sg-chain'),
call.add_chain('sg-chain'),
call.add_chain('ifake_dev'),
call.add_rule('FORWARD',
'-m physdev --physdev-is-bridged '
'--physdev-out tapfake_dev '
'-j $sg-chain'),
call.add_rule('sg-chain',
'-m physdev --physdev-is-bridged '
'--physdev-out tapfake_dev '
'-j $ifake_dev'),
call.add_rule(
'ifake_dev', '-m state --state INVALID -j DROP'),
call.add_rule(
'ifake_dev',
'-m state --state ESTABLISHED,RELATED -j RETURN')]
if ingress_expected_call:
calls.append(ingress_expected_call)
calls += [call.add_rule('ifake_dev', '-j $sg-fallback'),
call.add_chain('ofake_dev'),
call.add_rule('FORWARD',
'-m physdev --physdev-is-bridged '
'--physdev-in tapfake_dev '
'-j $sg-chain'),
call.add_rule('sg-chain',
'-m physdev --physdev-is-bridged '
'--physdev-in tapfake_dev '
'-j $ofake_dev'),
call.add_rule('INPUT',
'-m physdev --physdev-is-bridged '
'--physdev-in tapfake_dev '
'-j $ofake_dev'),
call.add_rule(
'ofake_dev',
'-m mac ! --mac-source ff:ff:ff:ff -j DROP'),
dhcp_rule,
call.add_rule('ofake_dev', '! -s %s -j DROP' % prefix)]
if ethertype == 'IPv4':
calls.append(call.add_rule(
'ofake_dev',
'-p udp --sport 67 --dport 68 -j DROP'))
calls += [call.add_rule(
'ofake_dev', '-m state --state INVALID -j DROP'),
call.add_rule(
'ofake_dev',
'-m state --state ESTABLISHED,RELATED -j RETURN')]
if egress_expected_call:
calls.append(egress_expected_call)
calls += [call.add_rule('ofake_dev', '-j $sg-fallback'),
call.add_rule('sg-chain', '-j ACCEPT')]
filter_inst.assert_has_calls(calls)
def test_update_delete_port_filter(self):
port = self._fake_port()
port['security_group_rules'] = [{'ethertype': 'IPv4',
'direction': 'ingress'}]
self.firewall.prepare_port_filter(port)
port['security_group_rules'] = [{'ethertype': 'IPv4',
'direction': 'egress'}]
self.firewall.update_port_filter(port)
self.firewall.update_port_filter({'device': 'no-exist-device'})
self.firewall.remove_port_filter(port)
self.firewall.remove_port_filter({'device': 'no-exist-device'})
calls = [call.add_chain('sg-fallback'),
call.add_rule('sg-fallback', '-j DROP'),
call.ensure_remove_chain('sg-chain'),
call.add_chain('sg-chain'),
call.add_chain('ifake_dev'),
call.add_rule(
'FORWARD',
'-m physdev --physdev-is-bridged '
'--physdev-out tapfake_dev -j $sg-chain'),
call.add_rule(
'sg-chain',
'-m physdev --physdev-is-bridged '
'--physdev-out tapfake_dev -j $ifake_dev'),
call.add_rule(
'ifake_dev', '-m state --state INVALID -j DROP'),
call.add_rule(
'ifake_dev',
'-m state --state ESTABLISHED,RELATED -j RETURN'),
call.add_rule('ifake_dev', '-j RETURN'),
call.add_rule('ifake_dev', '-j $sg-fallback'),
call.add_chain('ofake_dev'),
call.add_rule(
'FORWARD',
'-m physdev --physdev-is-bridged '
'--physdev-in tapfake_dev -j $sg-chain'),
call.add_rule(
'sg-chain',
'-m physdev --physdev-is-bridged '
'--physdev-in tapfake_dev -j $ofake_dev'),
call.add_rule(
'INPUT',
'-m physdev --physdev-is-bridged '
'--physdev-in tapfake_dev -j $ofake_dev'),
call.add_rule(
'ofake_dev',
'-m mac ! --mac-source ff:ff:ff:ff -j DROP'),
call.add_rule(
'ofake_dev',
'-p udp --sport 68 --dport 67 -j RETURN'),
call.add_rule(
'ofake_dev',
'! -s 10.0.0.1 -j DROP'),
call.add_rule(
'ofake_dev',
'-p udp --sport 67 --dport 68 -j DROP'),
call.add_rule(
'ofake_dev', '-m state --state INVALID -j DROP'),
call.add_rule(
'ofake_dev',
'-m state --state ESTABLISHED,RELATED -j RETURN'),
call.add_rule('ofake_dev', '-j $sg-fallback'),
call.add_rule('sg-chain', '-j ACCEPT'),
call.ensure_remove_chain('ifake_dev'),
call.ensure_remove_chain('ofake_dev'),
call.ensure_remove_chain('sg-chain'),
call.add_chain('sg-chain'),
call.add_chain('ifake_dev'),
call.add_rule(
'FORWARD',
'-m physdev --physdev-is-bridged '
'--physdev-out tapfake_dev -j $sg-chain'),
call.add_rule(
'sg-chain',
'-m physdev --physdev-is-bridged '
'--physdev-out tapfake_dev -j $ifake_dev'),
call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP'),
call.add_rule(
'ifake_dev',
'-m state --state ESTABLISHED,RELATED -j RETURN'),
call.add_rule('ifake_dev', '-j $sg-fallback'),
call.add_chain('ofake_dev'),
call.add_rule(
'FORWARD',
'-m physdev --physdev-is-bridged '
'--physdev-in tapfake_dev -j $sg-chain'),
call.add_rule(
'sg-chain',
'-m physdev --physdev-is-bridged '
'--physdev-in tapfake_dev -j $ofake_dev'),
call.add_rule(
'INPUT',
'-m physdev --physdev-is-bridged '
'--physdev-in tapfake_dev -j $ofake_dev'),
call.add_rule(
'ofake_dev',
'-m mac ! --mac-source ff:ff:ff:ff -j DROP'),
call.add_rule(
'ofake_dev', '-p udp --sport 68 --dport 67 -j RETURN'),
call.add_rule(
'ofake_dev', '! -s 10.0.0.1 -j DROP'),
call.add_rule(
'ofake_dev', '-p udp --sport 67 --dport 68 -j DROP'),
call.add_rule(
'ofake_dev', '-m state --state INVALID -j DROP'),
call.add_rule(
'ofake_dev',
'-m state --state ESTABLISHED,RELATED -j RETURN'),
call.add_rule('ofake_dev', '-j RETURN'),
call.add_rule('ofake_dev', '-j $sg-fallback'),
call.add_rule('sg-chain', '-j ACCEPT'),
call.ensure_remove_chain('ifake_dev'),
call.ensure_remove_chain('ofake_dev'),
call.ensure_remove_chain('sg-chain'),
call.add_chain('sg-chain')]
self.v4filter_inst.assert_has_calls(calls)
def test_remove_unknown_port(self):
port = self._fake_port()
self.firewall.remove_port_filter(port)
# checking no exception occures
self.v4filter_inst.assert_has_calls([])
def test_defer_apply(self):
with self.firewall.defer_apply():
pass
self.iptables_inst.assert_has_calls([call.defer_apply_on(),
call.defer_apply_off()])
def test_filter_defer_with_exception(self):
try:
with self.firewall.defer_apply():
raise Exception("same exception")
except:
pass
self.iptables_inst.assert_has_calls([call.defer_apply_on(),
call.defer_apply_off()])
|
# -*- coding: UTF-8 -*-
# Copyright 2016-2018 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
"""Database models specific for the Team variant of Lino Noi.
Defines a customized :class:`TicketDetail`.
"""
from __future__ import print_function
from lino_xl.lib.tickets.models import *
from lino.modlib.users.mixins import Assignable
from lino.api import _
from lino_xl.lib.working.choicelists import ReportingTypes, ZERO_DURATION
from lino.modlib.summaries.mixins import Summarized
def get_summary_fields():
for t in ReportingTypes.get_list_items():
yield t.name + '_hours'
class Site(Site):
class Meta(Site.Meta):
app_label = 'tickets'
abstract = dd.is_abstract_model(__name__, 'Site')
def get_change_observers(self, ar=None):
action = ar.bound_action.action if ar.bound_action else None
if ar is not None and isinstance(action, CreateRow) and issubclass(ar.actor.model,Ticket):
subs = rt.models.tickets.Subscription.objects.filter(site=ar.selected_rows[-1].site)
for (u, um) in [(u, u.mail_mode) for u in [sub.user for sub in subs]
if (u.user_type and u.user_type.has_required_roles([Triager])
and u != ar.get_user())]:
yield (u, um)
else:
for s in rt.models.tickets.Subscription.objects.filter(site=self):
yield (s.user, s.user.mail_mode)
def after_ui_create(self, ar):
super(Site, self).after_ui_create(ar)
rt.models.tickets.Subscription.objects.create(user=ar.get_user(), site=self)
class Ticket(Ticket, Assignable, Summarized):
class Meta(Ticket.Meta):
# app_label = 'tickets'
abstract = dd.is_abstract_model(__name__, 'Ticket')
def assigned_to_changed(self, ar):
"""Add a star and send notification of Assignment"""
# self.add_change_watcher(self.assigned_to)
if (self.assigned_to is not None and
self.assigned_to != ar.user and
dd.is_installed('notify')):
ctx = dict(user=ar.user, what=ar.obj2memo(self))
def msg(user, mm):
subject = _("{user} has assigned you to ticket: {what}").format(**ctx)
return (subject , tostring(E.span(subject)))
mt = rt.models.notify.MessageTypes.tickets
rt.models.notify.Message.emit_notification(
ar, self, mt, msg,
[(self.assigned_to, self.assigned_to.mail_mode)]
)
# def end_user_changed(self, ar):
# """Add a star"""
# self.add_change_watcher(self.end_user)
# def user_changed(self, ar):
# """Add a star"""
# self.add_change_watcher(self.user)
def after_ui_create(self, ar):
# print("Create")
# self.site_changed(ar)
# self.assigned_to_changed(ar)
# self.end_user_changed(ar)
# self.user_changed(ar)
super(Ticket, self).after_ui_create(ar)
show_commits = dd.ShowSlaveTable('github.CommitsByTicket')
show_changes = dd.ShowSlaveTable('changes.ChangesByMaster')
# show_wishes = dd.ShowSlaveTable('deploy.DeploymentsByTicket')
# show_stars = dd.ShowSlaveTable('stars.AllStarsByController')
def get_change_subject(self, ar, cw):
ctx = dict(user=ar.user, what=str(self))
if cw is None:
return _("{user} submitted ticket {what}").format(**ctx)
if len(list(cw.get_updates())) == 0:
return
return _("{user} modified {what}").format(**ctx)
def get_change_body(self, ar, cw):
ctx = dict(user=ar.user, what=ar.obj2memo(self))
if cw is None:
elems = [E.p(
_("{user} submitted ticket {what}").format(**ctx), ".")]
elems += list(self.get_change_info(ar, cw))
else:
items = list(cw.get_updates_html(["_user_cache"]))
if len(items) == 0:
return
elems = []
elems += list(self.get_change_info(ar, cw))
elems.append(E.p(
_("{user} modified {what}").format(**ctx), ":"))
elems.append(E.ul(*items))
# print("20170210 {}".format(tostring(E.div(*elems))))
return tostring(E.div(*elems))
@classmethod
def get_layout_aliases(cls):
yield ("SUMMARY_FIELDS", ' '.join(get_summary_fields()))
# @classmethod
# def get_summary_master_model(cls):
# return cls
def reset_summary_data(self):
for k in get_summary_fields():
setattr(self, k, ZERO_DURATION)
self.last_commenter = None
def get_summary_collectors(self):
qs = rt.models.working.Session.objects.filter(ticket=self)
yield (self.add_from_session, qs)
qs = rt.models.comments.Comment.objects.filter(**gfk2lookup(rt.models.comments.Comment._meta.get_field("owner"),
self)
).order_by("-created")[0:1]
yield (self.add_from_comment, qs)
def add_from_comment(self, obj):
self.last_commenter = obj.user
def add_from_session(self, obj):
d = obj.get_duration()
if d:
rt = obj.get_reporting_type()
k = rt.name + '_hours'
value = getattr(self, k) + d
setattr(self, k, value)\
@dd.chooser()
def site_choices(cls, end_user, user, ar):
user = user if user is not None else ar.get_user()
sub_user = [user.pk]
if end_user: sub_user.append(end_user.pk)
pks = rt.models.tickets.Subscription.objects.filter(user__pk__in=sub_user).values_list("site__pk", flat=True)
print(pks)
return Site.objects.filter(id__in=pks)
class TicketDetail(TicketDetail):
"""Customized detail_layout for Tickets in Noi
"""
main = "general more #history_tab #more2 #github.CommitsByTicket"
general = dd.Panel("""
general1:60 comments.CommentsByRFC:30
""", label=_("General"))
general1 = """
general1a:30 general1b:30
"""
# 50+6=56
# in XL: label span is 4, so we have 8 units for the fields
# 56.0/8 = 7
# summary: 50/56*8 = 7.14 --> 7
# id: 6/56*8 = 0.85 -> 1
general1a = """
summary id:6
site ticket_type
workflow_buttons
description
"""
general1b = """
user end_user
assigned_to private:10
priority:10 planned_time
SUMMARY_FIELDS
working.SessionsByTicket
"""
more = dd.Panel("""
more1 DuplicatesByTicket:20 #WishesByTicket
upgrade_notes LinksByTicket uploads.UploadsByController
""", label=_("More"))
# history_tab = dd.Panel("""
# changes.ChangesByMaster #stars.StarsByController:20
# github.CommitsByTicket
# """, label=_("History"), required_roles=dd.login_required(Triager))
more1 = """
created modified fixed_since #reported_for #fixed_date #fixed_time
state ref duplicate_of deadline
# standby feedback closed
"""
# more2 = dd.Panel("""
# # deploy.DeploymentsByTicket
# # skills.DemandsByDemander
# stars.AllStarsByController
# uploads.UploadsByController
# """, label=_("Even more"))
class TicketInsertLayout(dd.InsertLayout):
main = """
summary #private:20
right:30 left:50
"""
right = """
ticket_type
priority
end_user
#assigned_to
site
"""
left = """
description
"""
window_size = (80, 20)
class SiteDetail(SiteDetail):
main = """general config history"""
general = dd.Panel("""
gen_left:20 TicketsBySite:60
""", label=_("General"))
gen_left = """
overview
SubscriptionsBySite
"""
general2 = """
ref name id
company contact_person reporting_type
remark:20 workflow_buttons:20
"""
config = dd.Panel("""
general2
description
""", label=_("Configure"), required_roles = dd.login_required(TicketsStaff)
)
history = dd.Panel("""
# meetings.MeetingsBySite
working.SummariesBySite
""", label=_("History"))
# Note in the following lines we don't subclass Tickets because then
# we would need to also override these attributes for all subclasses
Tickets.insert_layout = 'tickets.TicketInsertLayout'
Tickets.params_layout = """user end_user assigned_to not_assigned_to interesting_for site has_site state priority
#deployed_to show_assigned show_active #show_deployed show_todo show_private
start_date end_date observed_event #topic #feasable_by has_ref
last_commenter not_last_commenter subscriber"""
Tickets.column_names = 'last_commenter id summary:50 #user:10 #topic #faculty priority ' \
'workflow_buttons:30 site:10 #project:10'
Tickets.tablet_columns = "id summary workflow_buttons"
#Tickets.tablet_columns_popin = "site project"
Tickets.mobile_columns = "workflow_buttons"
#Tickets.mobile_columns_pop = "summary workflow_buttons"
Tickets.popin_columns = "summary"
Tickets.order_by = ["-id"]
TicketsBySite.column_names = "priority detail_link planned_time SUMMARY_FIELDS workflow_buttons *"
# Sites.detail_layout = """
# id name partner #responsible_user
# remark
# #InterestsBySite TicketsBySite deploy.MilestonesBySite
# """
# Not needed, have it be inffered by mobile_columns or tablet_columns if both None, use normal grid.
#AllTickets.display_mode = "responsive_grid"
Remove all last_commenter from tickets.columns
# -*- coding: UTF-8 -*-
# Copyright 2016-2018 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
"""Database models specific for the Team variant of Lino Noi.
Defines a customized :class:`TicketDetail`.
"""
from __future__ import print_function
from lino_xl.lib.tickets.models import *
from lino.modlib.users.mixins import Assignable
from lino.api import _
from lino_xl.lib.working.choicelists import ReportingTypes, ZERO_DURATION
from lino.modlib.summaries.mixins import Summarized
def get_summary_fields():
for t in ReportingTypes.get_list_items():
yield t.name + '_hours'
class Site(Site):
class Meta(Site.Meta):
app_label = 'tickets'
abstract = dd.is_abstract_model(__name__, 'Site')
def get_change_observers(self, ar=None):
action = ar.bound_action.action if ar.bound_action else None
if ar is not None and isinstance(action, CreateRow) and issubclass(ar.actor.model,Ticket):
subs = rt.models.tickets.Subscription.objects.filter(site=ar.selected_rows[-1].site)
for (u, um) in [(u, u.mail_mode) for u in [sub.user for sub in subs]
if (u.user_type and u.user_type.has_required_roles([Triager])
and u != ar.get_user())]:
yield (u, um)
else:
for s in rt.models.tickets.Subscription.objects.filter(site=self):
yield (s.user, s.user.mail_mode)
def after_ui_create(self, ar):
super(Site, self).after_ui_create(ar)
rt.models.tickets.Subscription.objects.create(user=ar.get_user(), site=self)
class Ticket(Ticket, Assignable, Summarized):
class Meta(Ticket.Meta):
# app_label = 'tickets'
abstract = dd.is_abstract_model(__name__, 'Ticket')
def assigned_to_changed(self, ar):
"""Add a star and send notification of Assignment"""
# self.add_change_watcher(self.assigned_to)
if (self.assigned_to is not None and
self.assigned_to != ar.user and
dd.is_installed('notify')):
ctx = dict(user=ar.user, what=ar.obj2memo(self))
def msg(user, mm):
subject = _("{user} has assigned you to ticket: {what}").format(**ctx)
return (subject , tostring(E.span(subject)))
mt = rt.models.notify.MessageTypes.tickets
rt.models.notify.Message.emit_notification(
ar, self, mt, msg,
[(self.assigned_to, self.assigned_to.mail_mode)]
)
# def end_user_changed(self, ar):
# """Add a star"""
# self.add_change_watcher(self.end_user)
# def user_changed(self, ar):
# """Add a star"""
# self.add_change_watcher(self.user)
def after_ui_create(self, ar):
# print("Create")
# self.site_changed(ar)
# self.assigned_to_changed(ar)
# self.end_user_changed(ar)
# self.user_changed(ar)
super(Ticket, self).after_ui_create(ar)
show_commits = dd.ShowSlaveTable('github.CommitsByTicket')
show_changes = dd.ShowSlaveTable('changes.ChangesByMaster')
# show_wishes = dd.ShowSlaveTable('deploy.DeploymentsByTicket')
# show_stars = dd.ShowSlaveTable('stars.AllStarsByController')
def get_change_subject(self, ar, cw):
ctx = dict(user=ar.user, what=str(self))
if cw is None:
return _("{user} submitted ticket {what}").format(**ctx)
if len(list(cw.get_updates())) == 0:
return
return _("{user} modified {what}").format(**ctx)
def get_change_body(self, ar, cw):
ctx = dict(user=ar.user, what=ar.obj2memo(self))
if cw is None:
elems = [E.p(
_("{user} submitted ticket {what}").format(**ctx), ".")]
elems += list(self.get_change_info(ar, cw))
else:
items = list(cw.get_updates_html(["_user_cache"]))
if len(items) == 0:
return
elems = []
elems += list(self.get_change_info(ar, cw))
elems.append(E.p(
_("{user} modified {what}").format(**ctx), ":"))
elems.append(E.ul(*items))
# print("20170210 {}".format(tostring(E.div(*elems))))
return tostring(E.div(*elems))
@classmethod
def get_layout_aliases(cls):
yield ("SUMMARY_FIELDS", ' '.join(get_summary_fields()))
# @classmethod
# def get_summary_master_model(cls):
# return cls
def reset_summary_data(self):
for k in get_summary_fields():
setattr(self, k, ZERO_DURATION)
self.last_commenter = None
def get_summary_collectors(self):
qs = rt.models.working.Session.objects.filter(ticket=self)
yield (self.add_from_session, qs)
qs = rt.models.comments.Comment.objects.filter(**gfk2lookup(rt.models.comments.Comment._meta.get_field("owner"),
self)
).order_by("-created")[0:1]
yield (self.add_from_comment, qs)
def add_from_comment(self, obj):
self.last_commenter = obj.user
def add_from_session(self, obj):
d = obj.get_duration()
if d:
rt = obj.get_reporting_type()
k = rt.name + '_hours'
value = getattr(self, k) + d
setattr(self, k, value)\
@dd.chooser()
def site_choices(cls, end_user, user, ar):
user = user if user is not None else ar.get_user()
sub_user = [user.pk]
if end_user: sub_user.append(end_user.pk)
pks = rt.models.tickets.Subscription.objects.filter(user__pk__in=sub_user).values_list("site__pk", flat=True)
print(pks)
return Site.objects.filter(id__in=pks)
class TicketDetail(TicketDetail):
"""Customized detail_layout for Tickets in Noi
"""
main = "general more #history_tab #more2 #github.CommitsByTicket"
general = dd.Panel("""
general1:60 comments.CommentsByRFC:30
""", label=_("General"))
general1 = """
general1a:30 general1b:30
"""
# 50+6=56
# in XL: label span is 4, so we have 8 units for the fields
# 56.0/8 = 7
# summary: 50/56*8 = 7.14 --> 7
# id: 6/56*8 = 0.85 -> 1
general1a = """
summary id:6
site ticket_type
workflow_buttons
description
"""
general1b = """
user end_user
assigned_to private:10
priority:10 planned_time
SUMMARY_FIELDS
working.SessionsByTicket
"""
more = dd.Panel("""
more1 DuplicatesByTicket:20 #WishesByTicket
upgrade_notes LinksByTicket uploads.UploadsByController
""", label=_("More"))
# history_tab = dd.Panel("""
# changes.ChangesByMaster #stars.StarsByController:20
# github.CommitsByTicket
# """, label=_("History"), required_roles=dd.login_required(Triager))
more1 = """
created modified fixed_since #reported_for #fixed_date #fixed_time
state ref duplicate_of deadline
# standby feedback closed
"""
# more2 = dd.Panel("""
# # deploy.DeploymentsByTicket
# # skills.DemandsByDemander
# stars.AllStarsByController
# uploads.UploadsByController
# """, label=_("Even more"))
class TicketInsertLayout(dd.InsertLayout):
main = """
summary #private:20
right:30 left:50
"""
right = """
ticket_type
priority
end_user
#assigned_to
site
"""
left = """
description
"""
window_size = (80, 20)
class SiteDetail(SiteDetail):
main = """general config history"""
general = dd.Panel("""
gen_left:20 TicketsBySite:60
""", label=_("General"))
gen_left = """
overview
SubscriptionsBySite
"""
general2 = """
ref name id
company contact_person reporting_type
remark:20 workflow_buttons:20
"""
config = dd.Panel("""
general2
description
""", label=_("Configure"), required_roles = dd.login_required(TicketsStaff)
)
history = dd.Panel("""
# meetings.MeetingsBySite
working.SummariesBySite
""", label=_("History"))
# Note in the following lines we don't subclass Tickets because then
# we would need to also override these attributes for all subclasses
Tickets.insert_layout = 'tickets.TicketInsertLayout'
Tickets.params_layout = """user end_user assigned_to not_assigned_to interesting_for site has_site state priority
#deployed_to show_assigned show_active #show_deployed show_todo show_private
start_date end_date observed_event #topic #feasable_by has_ref
last_commenter not_last_commenter subscriber"""
Tickets.column_names = 'id summary:50 #user:10 #topic #faculty priority ' \
'workflow_buttons:30 site:10 #project:10'
Tickets.tablet_columns = "id summary workflow_buttons"
#Tickets.tablet_columns_popin = "site project"
Tickets.mobile_columns = "workflow_buttons"
#Tickets.mobile_columns_pop = "summary workflow_buttons"
Tickets.popin_columns = "summary"
Tickets.order_by = ["-id"]
TicketsBySite.column_names = "priority detail_link planned_time SUMMARY_FIELDS workflow_buttons *"
# Sites.detail_layout = """
# id name partner #responsible_user
# remark
# #InterestsBySite TicketsBySite deploy.MilestonesBySite
# """
# Not needed, have it be inffered by mobile_columns or tablet_columns if both None, use normal grid.
#AllTickets.display_mode = "responsive_grid"
|
from http import HTTPStatus
import pytest
import grequests
from flask import url_for
from eth_utils import (
to_checksum_address,
to_canonical_address,
is_checksum_address,
)
from raiden_contracts.constants import (
CONTRACT_HUMAN_STANDARD_TOKEN,
MAX_TOKENS_DEPLOY,
TEST_SETTLE_TIMEOUT_MIN,
TEST_SETTLE_TIMEOUT_MAX,
)
from raiden.api.v1.encoding import (
AddressField,
HexAddressConverter,
)
from raiden.transfer.state import (
CHANNEL_STATE_OPENED,
CHANNEL_STATE_CLOSED,
)
from raiden.tests.utils import assert_dicts_are_equal
from raiden.tests.utils.client import burn_all_eth
from raiden.tests.utils.smartcontracts import deploy_contract_web3
# pylint: disable=too-many-locals,unused-argument,too-many-lines
class CustomException(Exception):
pass
def assert_no_content_response(response):
assert(
response is not None and
response.text == '' and
response.status_code == HTTPStatus.NO_CONTENT
)
def assert_response_with_code(response, status_code):
assert (
response is not None and
response.status_code == status_code
)
def assert_response_with_error(response, status_code):
assert (
response is not None and
response.status_code == status_code and
'errors' in response.json() and
response.json()['errors'] != ''
)
def assert_proper_response(response, status_code=HTTPStatus.OK):
assert (
response is not None and
response.status_code == status_code and
response.headers['Content-Type'] == 'application/json'
)
def api_url_for(api_backend, endpoint, **kwargs):
api_server, _ = api_backend
# url_for() expects binary address so we have to convert here
for key, val in kwargs.items():
if isinstance(val, str) and val.startswith('0x'):
kwargs[key] = to_canonical_address(val)
with api_server.flask_app.app_context():
return url_for('v1_resources.{}'.format(endpoint), **kwargs)
def test_hex_converter():
converter = HexAddressConverter(map=None)
# invalid hex data
with pytest.raises(Exception):
converter.to_python('-')
# invalid address, too short
with pytest.raises(Exception):
converter.to_python('0x1234')
# missing prefix 0x
with pytest.raises(Exception):
converter.to_python('414d72a6f6e28f4950117696081450d63d56c354')
address = b'AMr\xa6\xf6\xe2\x8fIP\x11v\x96\x08\x14P\xd6=V\xc3T'
assert converter.to_python('0x414D72a6f6E28F4950117696081450d63D56C354') == address
def test_address_field():
# pylint: disable=protected-access
field = AddressField()
attr = 'test'
data = object()
# invalid hex data
with pytest.raises(Exception):
field._deserialize('-', attr, data)
# invalid address, too short
with pytest.raises(Exception):
field._deserialize('0x1234', attr, data)
# missing prefix 0x
with pytest.raises(Exception):
field._deserialize('414d72a6f6e28f4950117696081450d63d56c354', attr, data)
address = b'AMr\xa6\xf6\xe2\x8fIP\x11v\x96\x08\x14P\xd6=V\xc3T'
assert field._deserialize('0x414D72a6f6E28F4950117696081450d63D56C354', attr, data) == address
def test_url_with_invalid_address(rest_api_port_number, api_backend):
""" Addresses require the leading 0x in the urls. """
url_without_prefix = (
'http://localhost:{port}/api/1/'
'channels/ea674fdde714fd979de3edf0f56aa9716b898ec8'
).format(port=rest_api_port_number)
request = grequests.patch(
url_without_prefix,
json=dict(state='CHANNEL_STATE_SETTLED'),
)
response = request.send().response
assert_response_with_code(response, HTTPStatus.NOT_FOUND)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_payload_with_address_without_prefix(api_backend):
""" Addresses require leading 0x in the payload. """
invalid_address = '61c808d82a3ac53231750dadc13c777b59310bd9'
channel_data_obj = {
'partner_address': invalid_address,
'token_address': '0xEA674fdDe714fd979de3EdF0F56AA9716B898ec8',
'settle_timeout': 10,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_response_with_error(response, HTTPStatus.BAD_REQUEST)
@pytest.mark.xfail(
strict=True,
reason='Crashed app also crashes on teardown',
raises=CustomException,
)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_crash_on_unhandled_exception(api_backend):
""" Addresses require leading 0x in the payload. """
api_server, _ = api_backend
# as we should not have unhandled exceptions in our endpoints, create one to test
@api_server.flask_app.route('/error_endpoint', methods=['GET'])
def error_endpoint():
raise CustomException('This is an unhandled error')
with api_server.flask_app.app_context():
url = url_for('error_endpoint')
request = grequests.get(url)
request.send()
api_server.join(timeout=5)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_payload_with_address_invalid_chars(api_backend):
""" Addresses cannot have invalid characters in it. """
invalid_address = '0x61c808d82a3ac53231750dadc13c777b59310bdg' # g at the end is invalid
channel_data_obj = {
'partner_address': invalid_address,
'token_address': '0xEA674fdDe714fd979de3EdF0F56AA9716B898ec8',
'settle_timeout': 10,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_response_with_error(response, HTTPStatus.BAD_REQUEST)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_payload_with_address_invalid_length(api_backend):
""" Encoded addresses must have the right length. """
invalid_address = '0x61c808d82a3ac53231750dadc13c777b59310b' # g at the end is invalid
channel_data_obj = {
'partner_address': invalid_address,
'token_address': '0xEA674fdDe714fd979de3EdF0F56AA9716B898ec8',
'settle_timeout': 10,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_response_with_error(response, HTTPStatus.BAD_REQUEST)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_payload_with_address_not_eip55(api_backend):
""" Provided addresses must be EIP55 encoded. """
invalid_address = '0xf696209d2ca35e6c88e5b99b7cda3abf316bed69'
channel_data_obj = {
'partner_address': invalid_address,
'token_address': '0xEA674fdDe714fd979de3EdF0F56AA9716B898ec8',
'settle_timeout': 90,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_response_with_error(response, HTTPStatus.BAD_REQUEST)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_api_query_our_address(api_backend):
request = grequests.get(
api_url_for(api_backend, 'addressresource'),
)
response = request.send().response
assert_proper_response(response)
api_server, _ = api_backend
our_address = api_server.rest_api.raiden_api.address
assert response.json() == {'our_address': to_checksum_address(our_address)}
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_api_get_channel_list(
api_backend,
token_addresses,
reveal_timeout,
):
partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
request = grequests.get(
api_url_for(
api_backend,
'channelsresource',
),
)
response = request.send().response
assert_proper_response(response, HTTPStatus.OK)
assert response.json() == []
# let's create a new channel
token_address = token_addresses[0]
settle_timeout = 1650
channel_data_obj = {
'partner_address': partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
'reveal_timeout': reveal_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.CREATED)
request = grequests.get(
api_url_for(
api_backend,
'channelsresource',
),
)
response = request.send().response
assert_proper_response(response, HTTPStatus.OK)
channel_info = response.json()[0]
assert channel_info['partner_address'] == partner_address
assert channel_info['token_address'] == to_checksum_address(token_address)
assert channel_info['total_deposit'] == 0
assert 'token_network_identifier' in channel_info
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_api_channel_status_channel_nonexistant(
api_backend,
token_addresses,
):
partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
token_address = token_addresses[0]
request = grequests.get(
api_url_for(
api_backend,
'channelsresourcebytokenandpartneraddress',
token_address=token_address,
partner_address=partner_address,
),
)
response = request.send().response
assert_proper_response(response, HTTPStatus.NOT_FOUND)
assert response.json()['errors'] == (
"Channel with partner '{}' for token '{}' could not be found.".format(
to_checksum_address(partner_address),
to_checksum_address(token_address),
)
)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_api_open_and_deposit_channel(
api_backend,
token_addresses,
reveal_timeout,
):
# let's create a new channel
first_partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
token_address = token_addresses[0]
settle_timeout = 1650
channel_data_obj = {
'partner_address': first_partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
'reveal_timeout': reveal_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.CREATED)
first_channel_id = 1
response = response.json()
expected_response = channel_data_obj
expected_response['balance'] = 0
expected_response['state'] = CHANNEL_STATE_OPENED
expected_response['channel_identifier'] = 1
expected_response['token_network_identifier'] = assert_dicts_are_equal.IGNORE_VALUE
expected_response['total_deposit'] = 0
assert_dicts_are_equal(response, expected_response)
token_network_identifier = response['token_network_identifier']
# now let's open a channel and make a deposit too
second_partner_address = '0x29FA6cf0Cce24582a9B20DB94Be4B6E017896038'
total_deposit = 100
channel_data_obj = {
'partner_address': second_partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
'reveal_timeout': reveal_timeout,
'total_deposit': total_deposit,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.CREATED)
second_channel_id = 2
response = response.json()
expected_response = channel_data_obj
expected_response['balance'] = total_deposit
expected_response['state'] = CHANNEL_STATE_OPENED
expected_response['channel_identifier'] = second_channel_id
expected_response['token_network_identifier'] = token_network_identifier
expected_response['total_deposit'] = total_deposit
assert_dicts_are_equal(response, expected_response)
# assert depositing negative amount fails
request = grequests.patch(
api_url_for(
api_backend,
'channelsresourcebytokenandpartneraddress',
token_address=token_address,
partner_address=first_partner_address,
),
json={'total_deposit': -1000},
)
response = request.send().response
assert_proper_response(response, HTTPStatus.CONFLICT)
# let's deposit on the first channel
request = grequests.patch(
api_url_for(
api_backend,
'channelsresourcebytokenandpartneraddress',
token_address=token_address,
partner_address=first_partner_address,
),
json={'total_deposit': total_deposit},
)
response = request.send().response
assert_proper_response(response)
response = response.json()
expected_response = {
'channel_identifier': first_channel_id,
'partner_address': first_partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
'reveal_timeout': reveal_timeout,
'state': CHANNEL_STATE_OPENED,
'balance': total_deposit,
'total_deposit': total_deposit,
'token_network_identifier': token_network_identifier,
}
assert_dicts_are_equal(response, expected_response)
# let's try querying for the second channel
request = grequests.get(
api_url_for(
api_backend,
'channelsresourcebytokenandpartneraddress',
token_address=token_address,
partner_address=second_partner_address,
),
)
response = request.send().response
assert_proper_response(response)
response = response.json()
expected_response = {
'channel_identifier': second_channel_id,
'partner_address': second_partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
'reveal_timeout': reveal_timeout,
'state': CHANNEL_STATE_OPENED,
'balance': total_deposit,
'total_deposit': total_deposit,
'token_network_identifier': token_network_identifier,
}
assert_dicts_are_equal(response, expected_response)
# finally let's burn all eth and try to open another channel
api_server, _ = api_backend
burn_all_eth(api_server.rest_api.raiden_api.raiden)
channel_data_obj = {
'partner_address': '0xf3AF96F89b3d7CdcBE0C083690A28185Feb0b3CE',
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
'reveal_timeout': reveal_timeout,
'balance': 1,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.PAYMENT_REQUIRED)
response = response.json()
assert 'The account balance is below the estimated amount' in response['errors']
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_api_open_close_and_settle_channel(
api_backend,
token_addresses,
reveal_timeout,
):
# let's create a new channel
partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
token_address = token_addresses[0]
settle_timeout = 1650
channel_data_obj = {
'partner_address': partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
balance = 0
assert_proper_response(response, status_code=HTTPStatus.CREATED)
channel_identifier = 1
response = response.json()
expected_response = channel_data_obj
expected_response['balance'] = balance
expected_response['state'] = CHANNEL_STATE_OPENED
expected_response['reveal_timeout'] = reveal_timeout
expected_response['channel_identifier'] = channel_identifier
expected_response['token_network_identifier'] = assert_dicts_are_equal.IGNORE_VALUE
expected_response['total_deposit'] = 0
assert_dicts_are_equal(response, expected_response)
token_network_identifier = response['token_network_identifier']
# let's close the channel
request = grequests.patch(
api_url_for(
api_backend,
'channelsresourcebytokenandpartneraddress',
token_address=token_address,
partner_address=partner_address,
),
json={'state': CHANNEL_STATE_CLOSED},
)
response = request.send().response
assert_proper_response(response)
expected_response = {
'token_network_identifier': token_network_identifier,
'channel_identifier': channel_identifier,
'partner_address': partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
'reveal_timeout': reveal_timeout,
'state': CHANNEL_STATE_CLOSED,
'balance': balance,
'total_deposit': balance,
}
assert_dicts_are_equal(response.json(), expected_response)
@pytest.mark.parametrize('number_of_nodes', [2])
@pytest.mark.parametrize('channels_per_node', [0])
def test_api_close_insufficient_eth(
api_backend,
token_addresses,
reveal_timeout,
):
# let's create a new channel
partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
token_address = token_addresses[0]
settle_timeout = 1650
channel_data_obj = {
'partner_address': partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
balance = 0
assert_proper_response(response, status_code=HTTPStatus.CREATED)
channel_identifier = 1
response = response.json()
expected_response = channel_data_obj
expected_response['balance'] = balance
expected_response['state'] = CHANNEL_STATE_OPENED
expected_response['reveal_timeout'] = reveal_timeout
expected_response['channel_identifier'] = channel_identifier
expected_response['token_network_identifier'] = assert_dicts_are_equal.IGNORE_VALUE
expected_response['total_deposit'] = 0
assert_dicts_are_equal(response, expected_response)
# let's burn all eth and try to close the channel
api_server, _ = api_backend
burn_all_eth(api_server.rest_api.raiden_api.raiden)
request = grequests.patch(
api_url_for(
api_backend,
'channelsresourcebytokenandpartneraddress',
token_address=token_address,
partner_address=partner_address,
),
json={'state': CHANNEL_STATE_CLOSED},
)
response = request.send().response
assert_proper_response(response, HTTPStatus.PAYMENT_REQUIRED)
response = response.json()
assert 'Insufficient ETH' in response['errors']
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_api_open_channel_invalid_input(
api_backend,
token_addresses,
reveal_timeout,
):
partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
token_address = token_addresses[0]
settle_timeout = TEST_SETTLE_TIMEOUT_MIN - 1
channel_data_obj = {
'partner_address': partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
'reveal_timeout': reveal_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_response_with_error(response, status_code=HTTPStatus.CONFLICT)
channel_data_obj['settle_timeout'] = TEST_SETTLE_TIMEOUT_MAX + 1
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_response_with_error(response, status_code=HTTPStatus.CONFLICT)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_api_channel_state_change_errors(
api_backend,
token_addresses,
reveal_timeout,
):
partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
token_address = token_addresses[0]
settle_timeout = 1650
channel_data_obj = {
'partner_address': partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
'reveal_timeout': reveal_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.CREATED)
# let's try to set a random state
request = grequests.patch(
api_url_for(
api_backend,
'channelsresourcebytokenandpartneraddress',
token_address=token_address,
partner_address=partner_address,
),
json=dict(state='inlimbo'),
)
response = request.send().response
assert_response_with_error(response, HTTPStatus.BAD_REQUEST)
# let's try to set both new state and balance
request = grequests.patch(
api_url_for(
api_backend,
'channelsresourcebytokenandpartneraddress',
token_address=token_address,
partner_address=partner_address,
),
json=dict(state=CHANNEL_STATE_CLOSED, total_deposit=200),
)
response = request.send().response
assert_response_with_error(response, HTTPStatus.CONFLICT)
# let's try to patch with no arguments
request = grequests.patch(
api_url_for(
api_backend,
'channelsresourcebytokenandpartneraddress',
token_address=token_address,
partner_address=partner_address,
),
)
response = request.send().response
assert_response_with_error(response, HTTPStatus.BAD_REQUEST)
# ok now let's close and settle for real
request = grequests.patch(
api_url_for(
api_backend,
'channelsresourcebytokenandpartneraddress',
token_address=token_address,
partner_address=partner_address,
),
json=dict(state=CHANNEL_STATE_CLOSED),
)
response = request.send().response
assert_proper_response(response)
# let's try to deposit to a settled channel
request = grequests.patch(
api_url_for(
api_backend,
'channelsresourcebytokenandpartneraddress',
token_address=token_address,
partner_address=partner_address,
),
json=dict(total_deposit=500),
)
response = request.send().response
assert_response_with_error(response, HTTPStatus.CONFLICT)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
@pytest.mark.parametrize('number_of_tokens', [2])
def test_api_tokens(api_backend, blockchain_services, token_addresses):
partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
token_address1 = token_addresses[0]
token_address2 = token_addresses[1]
settle_timeout = 1650
channel_data_obj = {
'partner_address': partner_address,
'token_address': to_checksum_address(token_address1),
'settle_timeout': settle_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.CREATED)
partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
settle_timeout = 1650
channel_data_obj = {
'partner_address': partner_address,
'token_address': to_checksum_address(token_address2),
'settle_timeout': settle_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.CREATED)
# and now let's get the token list
request = grequests.get(
api_url_for(
api_backend,
'tokensresource',
),
)
response = request.send().response
assert_proper_response(response)
response = response.json()
expected_response = [
to_checksum_address(token_address1),
to_checksum_address(token_address2),
]
assert set(response) == set(expected_response)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_query_partners_by_token(api_backend, blockchain_services, token_addresses):
first_partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
second_partner_address = '0x29FA6cf0Cce24582a9B20DB94Be4B6E017896038'
token_address = token_addresses[0]
settle_timeout = 1650
channel_data_obj = {
'partner_address': first_partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.CREATED)
response = response.json()
channel_data_obj['partner_address'] = second_partner_address
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.CREATED)
response = response.json()
# and a channel for another token
channel_data_obj['partner_address'] = '0xb07937AbA15304FBBB0Bf6454a9377a76E3dD39E'
channel_data_obj['token_address'] = to_checksum_address(token_address)
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.CREATED)
# and now let's query our partners per token for the first token
request = grequests.get(
api_url_for(
api_backend,
'partnersresourcebytokenaddress',
token_address=to_checksum_address(token_address),
),
)
response = request.send().response
assert_proper_response(response)
response = response.json()
expected_response = [
{
'partner_address': first_partner_address,
'channel': '/api/1/channels/{}/{}'.format(
to_checksum_address(token_address),
to_checksum_address(first_partner_address),
),
}, {
'partner_address': second_partner_address,
'channel': '/api/1/channels/{}/{}'.format(
to_checksum_address(token_address),
to_checksum_address(second_partner_address),
),
},
]
assert all(r in response for r in expected_response)
@pytest.mark.parametrize('number_of_nodes', [2])
def test_api_payments(api_backend, raiden_network, token_addresses):
_, app1 = raiden_network
amount = 200
identifier = 42
token_address = token_addresses[0]
target_address = app1.raiden.address
api_server, _ = api_backend
our_address = api_server.rest_api.raiden_api.address
payment = {
'initiator_address': to_checksum_address(our_address),
'target_address': to_checksum_address(target_address),
'token_address': to_checksum_address(token_address),
'amount': amount,
'identifier': identifier,
}
request = grequests.post(
api_url_for(
api_backend,
'token_target_paymentresource',
token_address=to_checksum_address(token_address),
target_address=to_checksum_address(target_address),
),
json={'amount': amount, 'identifier': identifier},
)
response = request.send().response
assert_proper_response(response)
response = response.json()
assert response == payment
@pytest.mark.parametrize('number_of_tokens', [0])
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_register_token(api_backend, token_amount, token_addresses, raiden_network):
app0 = raiden_network[0]
new_token_address = deploy_contract_web3(
CONTRACT_HUMAN_STANDARD_TOKEN,
app0.raiden.chain.client,
num_confirmations=None,
constructor_arguments=(
token_amount,
2,
'raiden',
'Rd',
),
)
other_token_address = deploy_contract_web3(
CONTRACT_HUMAN_STANDARD_TOKEN,
app0.raiden.chain.client,
num_confirmations=None,
constructor_arguments=(
token_amount,
2,
'raiden',
'Rd',
),
)
register_request = grequests.put(api_url_for(
api_backend,
'registertokenresource',
token_address=to_checksum_address(new_token_address),
))
register_response = register_request.send().response
assert_proper_response(register_response, status_code=HTTPStatus.CREATED)
response_json = register_response.json()
assert 'token_network_address' in response_json
assert is_checksum_address(response_json['token_network_address'])
# now try to reregister it and get the error
conflict_request = grequests.put(api_url_for(
api_backend,
'registertokenresource',
token_address=to_checksum_address(new_token_address),
))
conflict_response = conflict_request.send().response
assert_response_with_error(conflict_response, HTTPStatus.CONFLICT)
# Burn all the eth and then make sure we get the appropriate API error
burn_all_eth(app0.raiden)
poor_request = grequests.put(api_url_for(
api_backend,
'registertokenresource',
token_address=to_checksum_address(other_token_address),
))
poor_response = poor_request.send().response
assert_response_with_error(poor_response, HTTPStatus.PAYMENT_REQUIRED)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
@pytest.mark.parametrize('number_of_tokens', [2])
def test_get_connection_managers_info(api_backend, token_addresses):
# check that there are no registered tokens
request = grequests.get(
api_url_for(api_backend, 'connectionsinforesource'),
)
response = request.send().response
result = response.json()
assert len(result) == 0
funds = 100
token_address1 = to_checksum_address(token_addresses[0])
connect_data_obj = {
'funds': funds,
}
request = grequests.put(
api_url_for(
api_backend,
'connectionsresource',
token_address=token_address1,
),
json=connect_data_obj,
)
response = request.send().response
assert_no_content_response(response)
# check that there now is one registered channel manager
request = grequests.get(
api_url_for(api_backend, 'connectionsinforesource'),
)
response = request.send().response
result = response.json()
assert isinstance(result, dict) and len(result.keys()) == 1
assert token_address1 in result
assert isinstance(result[token_address1], dict)
assert set(result[token_address1].keys()) == {'funds', 'sum_deposits', 'channels'}
funds = 100
token_address2 = to_checksum_address(token_addresses[1])
connect_data_obj = {
'funds': funds,
}
request = grequests.put(
api_url_for(
api_backend,
'connectionsresource',
token_address=token_address2,
),
json=connect_data_obj,
)
response = request.send().response
assert_no_content_response(response)
# check that there now are two registered channel managers
request = grequests.get(
api_url_for(api_backend, 'connectionsinforesource'),
)
response = request.send().response
result = response.json()
assert isinstance(result, dict) and len(result.keys()) == 2
assert token_address2 in result
assert isinstance(result[token_address2], dict)
assert set(result[token_address2].keys()) == {'funds', 'sum_deposits', 'channels'}
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
@pytest.mark.parametrize('number_of_tokens', [2])
def test_connect_insufficient_reserve(api_backend, token_addresses):
# Burn all eth and then try to connect to a token network
api_server, _ = api_backend
burn_all_eth(api_server.rest_api.raiden_api.raiden)
funds = 100
token_address1 = to_checksum_address(token_addresses[0])
connect_data_obj = {
'funds': funds,
}
request = grequests.put(
api_url_for(
api_backend,
'connectionsresource',
token_address=token_address1,
),
json=connect_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.PAYMENT_REQUIRED)
response = response.json()
assert 'The account balance is below the estimated amount' in response['errors']
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_network_events(api_backend, token_addresses):
# let's create a new channel
partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
token_address = token_addresses[0]
settle_timeout = 1650
channel_data_obj = {
'partner_address': partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, status_code=HTTPStatus.CREATED)
request = grequests.get(
api_url_for(
api_backend,
'blockchaineventsnetworkresource',
from_block=0,
),
)
response = request.send().response
assert_proper_response(response, status_code=HTTPStatus.OK)
assert len(response.json()) > 0
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_token_events(api_backend, token_addresses):
# let's create a new channel
partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
token_address = token_addresses[0]
settle_timeout = 1650
channel_data_obj = {
'partner_address': partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, status_code=HTTPStatus.CREATED)
request = grequests.get(
api_url_for(
api_backend,
'blockchaineventstokenresource',
token_address=token_address,
from_block=0,
),
)
response = request.send().response
assert_proper_response(response, status_code=HTTPStatus.OK)
assert len(response.json()) > 0
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_channel_events(api_backend, token_addresses):
# let's create a new channel
partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
token_address = token_addresses[0]
settle_timeout = 1650
channel_data_obj = {
'partner_address': partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, status_code=HTTPStatus.CREATED)
request = grequests.get(
api_url_for(
api_backend,
'tokenchanneleventsresourceblockchain',
partner_address=partner_address,
token_address=token_address,
from_block=0,
),
)
response = request.send().response
assert_proper_response(response, status_code=HTTPStatus.OK)
assert len(response.json()) > 0
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_token_events_errors_for_unregistered_token(api_backend):
request = grequests.get(
api_url_for(
api_backend,
'tokenchanneleventsresourceblockchain',
token_address='0x61C808D82A3Ac53231750daDc13c777b59310bD9',
from_block=5,
to_block=20,
),
)
response = request.send().response
assert_response_with_error(response, status_code=HTTPStatus.NOT_FOUND)
request = grequests.get(
api_url_for(
api_backend,
'channelblockchaineventsresource',
token_address='0x61C808D82A3Ac53231750daDc13c777b59310bD9',
partner_address='0x61C808D82A3Ac53231750daDc13c777b59313bD9',
from_block=5,
to_block=20,
),
)
response = request.send().response
assert_response_with_error(response, status_code=HTTPStatus.NOT_FOUND)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
@pytest.mark.parametrize('deposit', [50000])
def test_api_deposit_limit(
api_backend,
token_addresses,
reveal_timeout,
):
# let's create a new channel and deposit exactly the limit amount
first_partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
token_address = token_addresses[0]
settle_timeout = 1650
balance_working = MAX_TOKENS_DEPLOY * (10 ** 2) # token has two digits
channel_data_obj = {
'partner_address': first_partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
'reveal_timeout': reveal_timeout,
'total_deposit': balance_working,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.CREATED)
first_channel_identifier = 1
response = response.json()
expected_response = channel_data_obj
expected_response['balance'] = balance_working
expected_response['state'] = CHANNEL_STATE_OPENED
expected_response['channel_identifier'] = first_channel_identifier
expected_response['token_network_identifier'] = assert_dicts_are_equal.IGNORE_VALUE
expected_response['total_deposit'] = balance_working
assert_dicts_are_equal(response, expected_response)
# now let's open a channel and deposit a bit more than the limit
second_partner_address = '0x29FA6cf0Cce24582a9B20DB94Be4B6E017896038'
balance_failing = balance_working + 1 # token has two digits
channel_data_obj = {
'partner_address': second_partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
'reveal_timeout': reveal_timeout,
'total_deposit': balance_failing,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.CONFLICT)
response = response.json()
assert response['errors'] == 'The deposit of 10001 is bigger than the current limit of 10000'
@pytest.mark.parametrize('number_of_nodes', [3])
def test_payment_events_endpoints(api_backend, raiden_network, token_addresses):
_, app1, app2 = raiden_network
amount = 200
identifier = 42
token_address = token_addresses[0]
target1_address = app1.raiden.address
target2_address = app2.raiden.address
api_server, _ = api_backend
# sending tokens to target 1
request = grequests.post(
api_url_for(
api_backend,
'token_target_paymentresource',
token_address=to_checksum_address(token_address),
target_address=to_checksum_address(target1_address),
),
json={'amount': amount, 'identifier': identifier},
)
request.send()
# sending some tokens to target 2
amount -= 10
request = grequests.post(
api_url_for(
api_backend,
'token_target_paymentresource',
token_address=to_checksum_address(token_address),
target_address=to_checksum_address(target2_address),
),
json={'amount': amount, 'identifier': identifier},
)
request.send()
# test endpoint without (partner and token)
request = grequests.get(
api_url_for(
api_backend,
'paymentresource',
),
)
response = request.send().response
assert_proper_response(response, HTTPStatus.OK)
response = response.json()
assert len(response) == 2
assert response[0]['event'] == 'EventPaymentSentSuccess'
assert response[1]['event'] == 'EventPaymentSentSuccess'
# test endpoint without partner
request = grequests.get(
api_url_for(
api_backend,
'token_paymentresource',
token_address=token_address,
),
)
response = request.send().response
assert_proper_response(response, HTTPStatus.OK)
response = response.json()
assert len(response) == 2
assert response[0]['event'] == 'EventPaymentSentSuccess'
assert response[1]['event'] == 'EventPaymentSentSuccess'
# test endpoint for token and partner
request = grequests.get(
api_url_for(
api_backend,
'token_target_paymentresource',
token_address=token_address,
target_address=target1_address,
),
)
response = request.send().response
assert_proper_response(response, HTTPStatus.OK)
response = response.json()
assert len(response) == 1
assert response[0]['event'] == 'EventPaymentSentSuccess'
@pytest.mark.parametrize('number_of_nodes', [2])
def test_channel_events_raiden(api_backend, raiden_network, token_addresses):
_, app1 = raiden_network
amount = 200
identifier = 42
token_address = token_addresses[0]
target_address = app1.raiden.address
request = grequests.post(
api_url_for(
api_backend,
'token_target_paymentresource',
token_address=to_checksum_address(token_address),
target_address=to_checksum_address(target_address),
),
json={'amount': amount, 'identifier': identifier},
)
response = request.send().response
assert_proper_response(response)
Fix xfail on test_crash_on_unhandled_exception
[ci integration]
from http import HTTPStatus
import pytest
import grequests
from flask import url_for
from eth_utils import (
to_checksum_address,
to_canonical_address,
is_checksum_address,
)
from raiden_contracts.constants import (
CONTRACT_HUMAN_STANDARD_TOKEN,
MAX_TOKENS_DEPLOY,
TEST_SETTLE_TIMEOUT_MIN,
TEST_SETTLE_TIMEOUT_MAX,
)
from raiden.api.v1.encoding import (
AddressField,
HexAddressConverter,
)
from raiden.transfer.state import (
CHANNEL_STATE_OPENED,
CHANNEL_STATE_CLOSED,
)
from raiden.tests.utils import assert_dicts_are_equal
from raiden.tests.utils.client import burn_all_eth
from raiden.tests.utils.smartcontracts import deploy_contract_web3
# pylint: disable=too-many-locals,unused-argument,too-many-lines
class CustomException(Exception):
pass
def assert_no_content_response(response):
assert(
response is not None and
response.text == '' and
response.status_code == HTTPStatus.NO_CONTENT
)
def assert_response_with_code(response, status_code):
assert (
response is not None and
response.status_code == status_code
)
def assert_response_with_error(response, status_code):
assert (
response is not None and
response.status_code == status_code and
'errors' in response.json() and
response.json()['errors'] != ''
)
def assert_proper_response(response, status_code=HTTPStatus.OK):
assert (
response is not None and
response.status_code == status_code and
response.headers['Content-Type'] == 'application/json'
)
def api_url_for(api_backend, endpoint, **kwargs):
api_server, _ = api_backend
# url_for() expects binary address so we have to convert here
for key, val in kwargs.items():
if isinstance(val, str) and val.startswith('0x'):
kwargs[key] = to_canonical_address(val)
with api_server.flask_app.app_context():
return url_for('v1_resources.{}'.format(endpoint), **kwargs)
def test_hex_converter():
converter = HexAddressConverter(map=None)
# invalid hex data
with pytest.raises(Exception):
converter.to_python('-')
# invalid address, too short
with pytest.raises(Exception):
converter.to_python('0x1234')
# missing prefix 0x
with pytest.raises(Exception):
converter.to_python('414d72a6f6e28f4950117696081450d63d56c354')
address = b'AMr\xa6\xf6\xe2\x8fIP\x11v\x96\x08\x14P\xd6=V\xc3T'
assert converter.to_python('0x414D72a6f6E28F4950117696081450d63D56C354') == address
def test_address_field():
# pylint: disable=protected-access
field = AddressField()
attr = 'test'
data = object()
# invalid hex data
with pytest.raises(Exception):
field._deserialize('-', attr, data)
# invalid address, too short
with pytest.raises(Exception):
field._deserialize('0x1234', attr, data)
# missing prefix 0x
with pytest.raises(Exception):
field._deserialize('414d72a6f6e28f4950117696081450d63d56c354', attr, data)
address = b'AMr\xa6\xf6\xe2\x8fIP\x11v\x96\x08\x14P\xd6=V\xc3T'
assert field._deserialize('0x414D72a6f6E28F4950117696081450d63D56C354', attr, data) == address
def test_url_with_invalid_address(rest_api_port_number, api_backend):
""" Addresses require the leading 0x in the urls. """
url_without_prefix = (
'http://localhost:{port}/api/1/'
'channels/ea674fdde714fd979de3edf0f56aa9716b898ec8'
).format(port=rest_api_port_number)
request = grequests.patch(
url_without_prefix,
json=dict(state='CHANNEL_STATE_SETTLED'),
)
response = request.send().response
assert_response_with_code(response, HTTPStatus.NOT_FOUND)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_payload_with_address_without_prefix(api_backend):
""" Addresses require leading 0x in the payload. """
invalid_address = '61c808d82a3ac53231750dadc13c777b59310bd9'
channel_data_obj = {
'partner_address': invalid_address,
'token_address': '0xEA674fdDe714fd979de3EdF0F56AA9716B898ec8',
'settle_timeout': 10,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_response_with_error(response, HTTPStatus.BAD_REQUEST)
@pytest.mark.xfail(
strict=True,
reason='Crashed app also crashes on teardown',
raises=CustomException,
)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_crash_on_unhandled_exception(api_backend):
""" Crash when an unhandled exception happens on APIServer. """
api_server, _ = api_backend
# as we should not have unhandled exceptions in our endpoints, create one to test
@api_server.flask_app.route('/error_endpoint', methods=['GET'])
def error_endpoint():
raise CustomException('This is an unhandled error')
with api_server.flask_app.app_context():
url = url_for('error_endpoint')
request = grequests.get(url)
request.send()
api_server.get(timeout=10)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_payload_with_address_invalid_chars(api_backend):
""" Addresses cannot have invalid characters in it. """
invalid_address = '0x61c808d82a3ac53231750dadc13c777b59310bdg' # g at the end is invalid
channel_data_obj = {
'partner_address': invalid_address,
'token_address': '0xEA674fdDe714fd979de3EdF0F56AA9716B898ec8',
'settle_timeout': 10,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_response_with_error(response, HTTPStatus.BAD_REQUEST)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_payload_with_address_invalid_length(api_backend):
""" Encoded addresses must have the right length. """
invalid_address = '0x61c808d82a3ac53231750dadc13c777b59310b' # g at the end is invalid
channel_data_obj = {
'partner_address': invalid_address,
'token_address': '0xEA674fdDe714fd979de3EdF0F56AA9716B898ec8',
'settle_timeout': 10,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_response_with_error(response, HTTPStatus.BAD_REQUEST)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_payload_with_address_not_eip55(api_backend):
""" Provided addresses must be EIP55 encoded. """
invalid_address = '0xf696209d2ca35e6c88e5b99b7cda3abf316bed69'
channel_data_obj = {
'partner_address': invalid_address,
'token_address': '0xEA674fdDe714fd979de3EdF0F56AA9716B898ec8',
'settle_timeout': 90,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_response_with_error(response, HTTPStatus.BAD_REQUEST)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_api_query_our_address(api_backend):
request = grequests.get(
api_url_for(api_backend, 'addressresource'),
)
response = request.send().response
assert_proper_response(response)
api_server, _ = api_backend
our_address = api_server.rest_api.raiden_api.address
assert response.json() == {'our_address': to_checksum_address(our_address)}
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_api_get_channel_list(
api_backend,
token_addresses,
reveal_timeout,
):
partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
request = grequests.get(
api_url_for(
api_backend,
'channelsresource',
),
)
response = request.send().response
assert_proper_response(response, HTTPStatus.OK)
assert response.json() == []
# let's create a new channel
token_address = token_addresses[0]
settle_timeout = 1650
channel_data_obj = {
'partner_address': partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
'reveal_timeout': reveal_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.CREATED)
request = grequests.get(
api_url_for(
api_backend,
'channelsresource',
),
)
response = request.send().response
assert_proper_response(response, HTTPStatus.OK)
channel_info = response.json()[0]
assert channel_info['partner_address'] == partner_address
assert channel_info['token_address'] == to_checksum_address(token_address)
assert channel_info['total_deposit'] == 0
assert 'token_network_identifier' in channel_info
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_api_channel_status_channel_nonexistant(
api_backend,
token_addresses,
):
partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
token_address = token_addresses[0]
request = grequests.get(
api_url_for(
api_backend,
'channelsresourcebytokenandpartneraddress',
token_address=token_address,
partner_address=partner_address,
),
)
response = request.send().response
assert_proper_response(response, HTTPStatus.NOT_FOUND)
assert response.json()['errors'] == (
"Channel with partner '{}' for token '{}' could not be found.".format(
to_checksum_address(partner_address),
to_checksum_address(token_address),
)
)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_api_open_and_deposit_channel(
api_backend,
token_addresses,
reveal_timeout,
):
# let's create a new channel
first_partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
token_address = token_addresses[0]
settle_timeout = 1650
channel_data_obj = {
'partner_address': first_partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
'reveal_timeout': reveal_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.CREATED)
first_channel_id = 1
response = response.json()
expected_response = channel_data_obj
expected_response['balance'] = 0
expected_response['state'] = CHANNEL_STATE_OPENED
expected_response['channel_identifier'] = 1
expected_response['token_network_identifier'] = assert_dicts_are_equal.IGNORE_VALUE
expected_response['total_deposit'] = 0
assert_dicts_are_equal(response, expected_response)
token_network_identifier = response['token_network_identifier']
# now let's open a channel and make a deposit too
second_partner_address = '0x29FA6cf0Cce24582a9B20DB94Be4B6E017896038'
total_deposit = 100
channel_data_obj = {
'partner_address': second_partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
'reveal_timeout': reveal_timeout,
'total_deposit': total_deposit,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.CREATED)
second_channel_id = 2
response = response.json()
expected_response = channel_data_obj
expected_response['balance'] = total_deposit
expected_response['state'] = CHANNEL_STATE_OPENED
expected_response['channel_identifier'] = second_channel_id
expected_response['token_network_identifier'] = token_network_identifier
expected_response['total_deposit'] = total_deposit
assert_dicts_are_equal(response, expected_response)
# assert depositing negative amount fails
request = grequests.patch(
api_url_for(
api_backend,
'channelsresourcebytokenandpartneraddress',
token_address=token_address,
partner_address=first_partner_address,
),
json={'total_deposit': -1000},
)
response = request.send().response
assert_proper_response(response, HTTPStatus.CONFLICT)
# let's deposit on the first channel
request = grequests.patch(
api_url_for(
api_backend,
'channelsresourcebytokenandpartneraddress',
token_address=token_address,
partner_address=first_partner_address,
),
json={'total_deposit': total_deposit},
)
response = request.send().response
assert_proper_response(response)
response = response.json()
expected_response = {
'channel_identifier': first_channel_id,
'partner_address': first_partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
'reveal_timeout': reveal_timeout,
'state': CHANNEL_STATE_OPENED,
'balance': total_deposit,
'total_deposit': total_deposit,
'token_network_identifier': token_network_identifier,
}
assert_dicts_are_equal(response, expected_response)
# let's try querying for the second channel
request = grequests.get(
api_url_for(
api_backend,
'channelsresourcebytokenandpartneraddress',
token_address=token_address,
partner_address=second_partner_address,
),
)
response = request.send().response
assert_proper_response(response)
response = response.json()
expected_response = {
'channel_identifier': second_channel_id,
'partner_address': second_partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
'reveal_timeout': reveal_timeout,
'state': CHANNEL_STATE_OPENED,
'balance': total_deposit,
'total_deposit': total_deposit,
'token_network_identifier': token_network_identifier,
}
assert_dicts_are_equal(response, expected_response)
# finally let's burn all eth and try to open another channel
api_server, _ = api_backend
burn_all_eth(api_server.rest_api.raiden_api.raiden)
channel_data_obj = {
'partner_address': '0xf3AF96F89b3d7CdcBE0C083690A28185Feb0b3CE',
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
'reveal_timeout': reveal_timeout,
'balance': 1,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.PAYMENT_REQUIRED)
response = response.json()
assert 'The account balance is below the estimated amount' in response['errors']
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_api_open_close_and_settle_channel(
api_backend,
token_addresses,
reveal_timeout,
):
# let's create a new channel
partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
token_address = token_addresses[0]
settle_timeout = 1650
channel_data_obj = {
'partner_address': partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
balance = 0
assert_proper_response(response, status_code=HTTPStatus.CREATED)
channel_identifier = 1
response = response.json()
expected_response = channel_data_obj
expected_response['balance'] = balance
expected_response['state'] = CHANNEL_STATE_OPENED
expected_response['reveal_timeout'] = reveal_timeout
expected_response['channel_identifier'] = channel_identifier
expected_response['token_network_identifier'] = assert_dicts_are_equal.IGNORE_VALUE
expected_response['total_deposit'] = 0
assert_dicts_are_equal(response, expected_response)
token_network_identifier = response['token_network_identifier']
# let's close the channel
request = grequests.patch(
api_url_for(
api_backend,
'channelsresourcebytokenandpartneraddress',
token_address=token_address,
partner_address=partner_address,
),
json={'state': CHANNEL_STATE_CLOSED},
)
response = request.send().response
assert_proper_response(response)
expected_response = {
'token_network_identifier': token_network_identifier,
'channel_identifier': channel_identifier,
'partner_address': partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
'reveal_timeout': reveal_timeout,
'state': CHANNEL_STATE_CLOSED,
'balance': balance,
'total_deposit': balance,
}
assert_dicts_are_equal(response.json(), expected_response)
@pytest.mark.parametrize('number_of_nodes', [2])
@pytest.mark.parametrize('channels_per_node', [0])
def test_api_close_insufficient_eth(
api_backend,
token_addresses,
reveal_timeout,
):
# let's create a new channel
partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
token_address = token_addresses[0]
settle_timeout = 1650
channel_data_obj = {
'partner_address': partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
balance = 0
assert_proper_response(response, status_code=HTTPStatus.CREATED)
channel_identifier = 1
response = response.json()
expected_response = channel_data_obj
expected_response['balance'] = balance
expected_response['state'] = CHANNEL_STATE_OPENED
expected_response['reveal_timeout'] = reveal_timeout
expected_response['channel_identifier'] = channel_identifier
expected_response['token_network_identifier'] = assert_dicts_are_equal.IGNORE_VALUE
expected_response['total_deposit'] = 0
assert_dicts_are_equal(response, expected_response)
# let's burn all eth and try to close the channel
api_server, _ = api_backend
burn_all_eth(api_server.rest_api.raiden_api.raiden)
request = grequests.patch(
api_url_for(
api_backend,
'channelsresourcebytokenandpartneraddress',
token_address=token_address,
partner_address=partner_address,
),
json={'state': CHANNEL_STATE_CLOSED},
)
response = request.send().response
assert_proper_response(response, HTTPStatus.PAYMENT_REQUIRED)
response = response.json()
assert 'Insufficient ETH' in response['errors']
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_api_open_channel_invalid_input(
api_backend,
token_addresses,
reveal_timeout,
):
partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
token_address = token_addresses[0]
settle_timeout = TEST_SETTLE_TIMEOUT_MIN - 1
channel_data_obj = {
'partner_address': partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
'reveal_timeout': reveal_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_response_with_error(response, status_code=HTTPStatus.CONFLICT)
channel_data_obj['settle_timeout'] = TEST_SETTLE_TIMEOUT_MAX + 1
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_response_with_error(response, status_code=HTTPStatus.CONFLICT)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_api_channel_state_change_errors(
api_backend,
token_addresses,
reveal_timeout,
):
partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
token_address = token_addresses[0]
settle_timeout = 1650
channel_data_obj = {
'partner_address': partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
'reveal_timeout': reveal_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.CREATED)
# let's try to set a random state
request = grequests.patch(
api_url_for(
api_backend,
'channelsresourcebytokenandpartneraddress',
token_address=token_address,
partner_address=partner_address,
),
json=dict(state='inlimbo'),
)
response = request.send().response
assert_response_with_error(response, HTTPStatus.BAD_REQUEST)
# let's try to set both new state and balance
request = grequests.patch(
api_url_for(
api_backend,
'channelsresourcebytokenandpartneraddress',
token_address=token_address,
partner_address=partner_address,
),
json=dict(state=CHANNEL_STATE_CLOSED, total_deposit=200),
)
response = request.send().response
assert_response_with_error(response, HTTPStatus.CONFLICT)
# let's try to patch with no arguments
request = grequests.patch(
api_url_for(
api_backend,
'channelsresourcebytokenandpartneraddress',
token_address=token_address,
partner_address=partner_address,
),
)
response = request.send().response
assert_response_with_error(response, HTTPStatus.BAD_REQUEST)
# ok now let's close and settle for real
request = grequests.patch(
api_url_for(
api_backend,
'channelsresourcebytokenandpartneraddress',
token_address=token_address,
partner_address=partner_address,
),
json=dict(state=CHANNEL_STATE_CLOSED),
)
response = request.send().response
assert_proper_response(response)
# let's try to deposit to a settled channel
request = grequests.patch(
api_url_for(
api_backend,
'channelsresourcebytokenandpartneraddress',
token_address=token_address,
partner_address=partner_address,
),
json=dict(total_deposit=500),
)
response = request.send().response
assert_response_with_error(response, HTTPStatus.CONFLICT)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
@pytest.mark.parametrize('number_of_tokens', [2])
def test_api_tokens(api_backend, blockchain_services, token_addresses):
partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
token_address1 = token_addresses[0]
token_address2 = token_addresses[1]
settle_timeout = 1650
channel_data_obj = {
'partner_address': partner_address,
'token_address': to_checksum_address(token_address1),
'settle_timeout': settle_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.CREATED)
partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
settle_timeout = 1650
channel_data_obj = {
'partner_address': partner_address,
'token_address': to_checksum_address(token_address2),
'settle_timeout': settle_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.CREATED)
# and now let's get the token list
request = grequests.get(
api_url_for(
api_backend,
'tokensresource',
),
)
response = request.send().response
assert_proper_response(response)
response = response.json()
expected_response = [
to_checksum_address(token_address1),
to_checksum_address(token_address2),
]
assert set(response) == set(expected_response)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_query_partners_by_token(api_backend, blockchain_services, token_addresses):
first_partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
second_partner_address = '0x29FA6cf0Cce24582a9B20DB94Be4B6E017896038'
token_address = token_addresses[0]
settle_timeout = 1650
channel_data_obj = {
'partner_address': first_partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.CREATED)
response = response.json()
channel_data_obj['partner_address'] = second_partner_address
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.CREATED)
response = response.json()
# and a channel for another token
channel_data_obj['partner_address'] = '0xb07937AbA15304FBBB0Bf6454a9377a76E3dD39E'
channel_data_obj['token_address'] = to_checksum_address(token_address)
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.CREATED)
# and now let's query our partners per token for the first token
request = grequests.get(
api_url_for(
api_backend,
'partnersresourcebytokenaddress',
token_address=to_checksum_address(token_address),
),
)
response = request.send().response
assert_proper_response(response)
response = response.json()
expected_response = [
{
'partner_address': first_partner_address,
'channel': '/api/1/channels/{}/{}'.format(
to_checksum_address(token_address),
to_checksum_address(first_partner_address),
),
}, {
'partner_address': second_partner_address,
'channel': '/api/1/channels/{}/{}'.format(
to_checksum_address(token_address),
to_checksum_address(second_partner_address),
),
},
]
assert all(r in response for r in expected_response)
@pytest.mark.parametrize('number_of_nodes', [2])
def test_api_payments(api_backend, raiden_network, token_addresses):
_, app1 = raiden_network
amount = 200
identifier = 42
token_address = token_addresses[0]
target_address = app1.raiden.address
api_server, _ = api_backend
our_address = api_server.rest_api.raiden_api.address
payment = {
'initiator_address': to_checksum_address(our_address),
'target_address': to_checksum_address(target_address),
'token_address': to_checksum_address(token_address),
'amount': amount,
'identifier': identifier,
}
request = grequests.post(
api_url_for(
api_backend,
'token_target_paymentresource',
token_address=to_checksum_address(token_address),
target_address=to_checksum_address(target_address),
),
json={'amount': amount, 'identifier': identifier},
)
response = request.send().response
assert_proper_response(response)
response = response.json()
assert response == payment
@pytest.mark.parametrize('number_of_tokens', [0])
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_register_token(api_backend, token_amount, token_addresses, raiden_network):
app0 = raiden_network[0]
new_token_address = deploy_contract_web3(
CONTRACT_HUMAN_STANDARD_TOKEN,
app0.raiden.chain.client,
num_confirmations=None,
constructor_arguments=(
token_amount,
2,
'raiden',
'Rd',
),
)
other_token_address = deploy_contract_web3(
CONTRACT_HUMAN_STANDARD_TOKEN,
app0.raiden.chain.client,
num_confirmations=None,
constructor_arguments=(
token_amount,
2,
'raiden',
'Rd',
),
)
register_request = grequests.put(api_url_for(
api_backend,
'registertokenresource',
token_address=to_checksum_address(new_token_address),
))
register_response = register_request.send().response
assert_proper_response(register_response, status_code=HTTPStatus.CREATED)
response_json = register_response.json()
assert 'token_network_address' in response_json
assert is_checksum_address(response_json['token_network_address'])
# now try to reregister it and get the error
conflict_request = grequests.put(api_url_for(
api_backend,
'registertokenresource',
token_address=to_checksum_address(new_token_address),
))
conflict_response = conflict_request.send().response
assert_response_with_error(conflict_response, HTTPStatus.CONFLICT)
# Burn all the eth and then make sure we get the appropriate API error
burn_all_eth(app0.raiden)
poor_request = grequests.put(api_url_for(
api_backend,
'registertokenresource',
token_address=to_checksum_address(other_token_address),
))
poor_response = poor_request.send().response
assert_response_with_error(poor_response, HTTPStatus.PAYMENT_REQUIRED)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
@pytest.mark.parametrize('number_of_tokens', [2])
def test_get_connection_managers_info(api_backend, token_addresses):
# check that there are no registered tokens
request = grequests.get(
api_url_for(api_backend, 'connectionsinforesource'),
)
response = request.send().response
result = response.json()
assert len(result) == 0
funds = 100
token_address1 = to_checksum_address(token_addresses[0])
connect_data_obj = {
'funds': funds,
}
request = grequests.put(
api_url_for(
api_backend,
'connectionsresource',
token_address=token_address1,
),
json=connect_data_obj,
)
response = request.send().response
assert_no_content_response(response)
# check that there now is one registered channel manager
request = grequests.get(
api_url_for(api_backend, 'connectionsinforesource'),
)
response = request.send().response
result = response.json()
assert isinstance(result, dict) and len(result.keys()) == 1
assert token_address1 in result
assert isinstance(result[token_address1], dict)
assert set(result[token_address1].keys()) == {'funds', 'sum_deposits', 'channels'}
funds = 100
token_address2 = to_checksum_address(token_addresses[1])
connect_data_obj = {
'funds': funds,
}
request = grequests.put(
api_url_for(
api_backend,
'connectionsresource',
token_address=token_address2,
),
json=connect_data_obj,
)
response = request.send().response
assert_no_content_response(response)
# check that there now are two registered channel managers
request = grequests.get(
api_url_for(api_backend, 'connectionsinforesource'),
)
response = request.send().response
result = response.json()
assert isinstance(result, dict) and len(result.keys()) == 2
assert token_address2 in result
assert isinstance(result[token_address2], dict)
assert set(result[token_address2].keys()) == {'funds', 'sum_deposits', 'channels'}
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
@pytest.mark.parametrize('number_of_tokens', [2])
def test_connect_insufficient_reserve(api_backend, token_addresses):
# Burn all eth and then try to connect to a token network
api_server, _ = api_backend
burn_all_eth(api_server.rest_api.raiden_api.raiden)
funds = 100
token_address1 = to_checksum_address(token_addresses[0])
connect_data_obj = {
'funds': funds,
}
request = grequests.put(
api_url_for(
api_backend,
'connectionsresource',
token_address=token_address1,
),
json=connect_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.PAYMENT_REQUIRED)
response = response.json()
assert 'The account balance is below the estimated amount' in response['errors']
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_network_events(api_backend, token_addresses):
# let's create a new channel
partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
token_address = token_addresses[0]
settle_timeout = 1650
channel_data_obj = {
'partner_address': partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, status_code=HTTPStatus.CREATED)
request = grequests.get(
api_url_for(
api_backend,
'blockchaineventsnetworkresource',
from_block=0,
),
)
response = request.send().response
assert_proper_response(response, status_code=HTTPStatus.OK)
assert len(response.json()) > 0
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_token_events(api_backend, token_addresses):
# let's create a new channel
partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
token_address = token_addresses[0]
settle_timeout = 1650
channel_data_obj = {
'partner_address': partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, status_code=HTTPStatus.CREATED)
request = grequests.get(
api_url_for(
api_backend,
'blockchaineventstokenresource',
token_address=token_address,
from_block=0,
),
)
response = request.send().response
assert_proper_response(response, status_code=HTTPStatus.OK)
assert len(response.json()) > 0
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_channel_events(api_backend, token_addresses):
# let's create a new channel
partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
token_address = token_addresses[0]
settle_timeout = 1650
channel_data_obj = {
'partner_address': partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, status_code=HTTPStatus.CREATED)
request = grequests.get(
api_url_for(
api_backend,
'tokenchanneleventsresourceblockchain',
partner_address=partner_address,
token_address=token_address,
from_block=0,
),
)
response = request.send().response
assert_proper_response(response, status_code=HTTPStatus.OK)
assert len(response.json()) > 0
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_token_events_errors_for_unregistered_token(api_backend):
request = grequests.get(
api_url_for(
api_backend,
'tokenchanneleventsresourceblockchain',
token_address='0x61C808D82A3Ac53231750daDc13c777b59310bD9',
from_block=5,
to_block=20,
),
)
response = request.send().response
assert_response_with_error(response, status_code=HTTPStatus.NOT_FOUND)
request = grequests.get(
api_url_for(
api_backend,
'channelblockchaineventsresource',
token_address='0x61C808D82A3Ac53231750daDc13c777b59310bD9',
partner_address='0x61C808D82A3Ac53231750daDc13c777b59313bD9',
from_block=5,
to_block=20,
),
)
response = request.send().response
assert_response_with_error(response, status_code=HTTPStatus.NOT_FOUND)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
@pytest.mark.parametrize('deposit', [50000])
def test_api_deposit_limit(
api_backend,
token_addresses,
reveal_timeout,
):
# let's create a new channel and deposit exactly the limit amount
first_partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
token_address = token_addresses[0]
settle_timeout = 1650
balance_working = MAX_TOKENS_DEPLOY * (10 ** 2) # token has two digits
channel_data_obj = {
'partner_address': first_partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
'reveal_timeout': reveal_timeout,
'total_deposit': balance_working,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.CREATED)
first_channel_identifier = 1
response = response.json()
expected_response = channel_data_obj
expected_response['balance'] = balance_working
expected_response['state'] = CHANNEL_STATE_OPENED
expected_response['channel_identifier'] = first_channel_identifier
expected_response['token_network_identifier'] = assert_dicts_are_equal.IGNORE_VALUE
expected_response['total_deposit'] = balance_working
assert_dicts_are_equal(response, expected_response)
# now let's open a channel and deposit a bit more than the limit
second_partner_address = '0x29FA6cf0Cce24582a9B20DB94Be4B6E017896038'
balance_failing = balance_working + 1 # token has two digits
channel_data_obj = {
'partner_address': second_partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
'reveal_timeout': reveal_timeout,
'total_deposit': balance_failing,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.CONFLICT)
response = response.json()
assert response['errors'] == 'The deposit of 10001 is bigger than the current limit of 10000'
@pytest.mark.parametrize('number_of_nodes', [3])
def test_payment_events_endpoints(api_backend, raiden_network, token_addresses):
_, app1, app2 = raiden_network
amount = 200
identifier = 42
token_address = token_addresses[0]
target1_address = app1.raiden.address
target2_address = app2.raiden.address
api_server, _ = api_backend
# sending tokens to target 1
request = grequests.post(
api_url_for(
api_backend,
'token_target_paymentresource',
token_address=to_checksum_address(token_address),
target_address=to_checksum_address(target1_address),
),
json={'amount': amount, 'identifier': identifier},
)
request.send()
# sending some tokens to target 2
amount -= 10
request = grequests.post(
api_url_for(
api_backend,
'token_target_paymentresource',
token_address=to_checksum_address(token_address),
target_address=to_checksum_address(target2_address),
),
json={'amount': amount, 'identifier': identifier},
)
request.send()
# test endpoint without (partner and token)
request = grequests.get(
api_url_for(
api_backend,
'paymentresource',
),
)
response = request.send().response
assert_proper_response(response, HTTPStatus.OK)
response = response.json()
assert len(response) == 2
assert response[0]['event'] == 'EventPaymentSentSuccess'
assert response[1]['event'] == 'EventPaymentSentSuccess'
# test endpoint without partner
request = grequests.get(
api_url_for(
api_backend,
'token_paymentresource',
token_address=token_address,
),
)
response = request.send().response
assert_proper_response(response, HTTPStatus.OK)
response = response.json()
assert len(response) == 2
assert response[0]['event'] == 'EventPaymentSentSuccess'
assert response[1]['event'] == 'EventPaymentSentSuccess'
# test endpoint for token and partner
request = grequests.get(
api_url_for(
api_backend,
'token_target_paymentresource',
token_address=token_address,
target_address=target1_address,
),
)
response = request.send().response
assert_proper_response(response, HTTPStatus.OK)
response = response.json()
assert len(response) == 1
assert response[0]['event'] == 'EventPaymentSentSuccess'
@pytest.mark.parametrize('number_of_nodes', [2])
def test_channel_events_raiden(api_backend, raiden_network, token_addresses):
_, app1 = raiden_network
amount = 200
identifier = 42
token_address = token_addresses[0]
target_address = app1.raiden.address
request = grequests.post(
api_url_for(
api_backend,
'token_target_paymentresource',
token_address=to_checksum_address(token_address),
target_address=to_checksum_address(target_address),
),
json={'amount': amount, 'identifier': identifier},
)
response = request.send().response
assert_proper_response(response)
|
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2018, Arm Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Scheduler load tracking analysis module """
import operator
import itertools
import pandas as pd
from lisa.analysis.base import TraceAnalysisBase
from lisa.analysis.status import StatusAnalysis
from lisa.trace import requires_one_event_of, may_use_events, TaskID
from lisa.utils import deprecate
from lisa.datautils import df_refit_index, series_refit_index, df_filter_task_ids, df_split_signals
from lisa.conf import TypedList
class LoadTrackingAnalysis(TraceAnalysisBase):
"""
Support for scheduler load tracking analysis
:param trace: input Trace object
:type trace: lisa.trace.Trace
"""
name = 'load_tracking'
_SCHED_PELT_SE_NAMES = [
'sched_pelt_se',
'sched_load_se',
'sched_load_avg_task'
]
"""
All the names that the per-task load tracking event ever had in various
kernel versions (Android, mainline etc)
"""
_SCHED_PELT_CFS_NAMES = [
'sched_pelt_cfs',
'sched_load_cfs_rq',
'sched_load_avg_cpu',
]
"""
All the names that the per-CPU load tracking event ever had in various
kernel versions (Android, mainline etc)
"""
@classmethod
def _columns_renaming(cls, event):
"""
Columns to rename to unify dataframes between trace event versions
"""
if event in ['sched_load_avg_cpu', 'sched_load_avg_task']:
return {
"util_avg": "util",
"load_avg": "load"
}
if event == 'sched_util_est_task':
return {
'est_enqueued': 'util_est_enqueued',
'est_ewma': 'util_est_ewma',
}
return {}
@classmethod
def _columns_to_drop(cls, event):
"""
The extra columns not shared between trace event versions
"""
if event in [*cls._SCHED_PELT_CFS_NAMES, 'sched_load_se', 'sched_pelt_se']:
return ['path', 'rbl_load', 'runnable']
if event in ['sched_load_avg_task']:
return ['load_sum', 'period_contrib', 'util_sum']
return []
def _df_uniformized_signal(self, event):
df = self.trace.df_events(event)
df = df.rename(columns=self._columns_renaming(event), copy=True)
# Legacy sched_load_avg_* events don't have a `path` field.
if not event.startswith('sched_load_avg_'):
if event in self._SCHED_PELT_SE_NAMES:
df = df[df.path == "(null)"]
if event in self._SCHED_PELT_CFS_NAMES:
df = df[df.path == "/"]
to_drop = self._columns_to_drop(event)
df.drop(columns=to_drop, inplace=True, errors='ignore')
return df
def _df_either_event(self, events):
for event in events:
if event not in self.trace.available_events:
continue
return self._df_uniformized_signal(event)
raise RuntimeError("Trace is missing one of either events: {}".format(events))
@may_use_events(
requires_one_event_of(*_SCHED_PELT_CFS_NAMES),
'sched_util_est_cpu'
)
def df_cpus_signal(self, signal):
"""
Get the load-tracking signals for the CPUs
:returns: a :class:`pandas.DataFrame` with a column of the same name as
the specified ``signal``, and additional context columns such as
``cpu``.
:param signal: Signal name to get. Can be any of:
* ``util``
* ``load``
* ``util_est_enqueued``
:type signal: str
"""
if signal in ('util', 'load'):
df = self._df_either_event(self._SCHED_PELT_CFS_NAMES)
elif signal == 'util_est_enqueued':
df = self._df_uniformized_signal('sched_util_est_cpu')
else:
raise ValueError('Signal "{}" not supported'.format(signal))
return df[['cpu', signal]]
@deprecate(replaced_by=df_cpus_signal, deprecated_in='2.0', removed_in='2.1')
@requires_one_event_of(*_SCHED_PELT_CFS_NAMES)
def df_cpus_signals(self):
"""
Get the load-tracking signals for the CPUs
:returns: a :class:`pandas.DataFrame` with:
* A ``util`` column (the average utilization of a CPU at time t)
* A ``load`` column (the average load of a CPU at time t)
"""
return self._df_either_event(self._SCHED_PELT_CFS_NAMES)
@TraceAnalysisBase.cache
@may_use_events(
requires_one_event_of(*_SCHED_PELT_SE_NAMES),
'sched_util_est_task'
)
def df_tasks_signal(self, signal):
"""
Get the load-tracking signals for the tasks
:returns: a :class:`pandas.DataFrame` with a column of the same name as
the specified ``signal``, and additional context columns such as
``cpu``.
:param signal: Signal name to get. Can be any of:
* ``util``
* ``load``
* ``util_est_enqueued``
* ``util_est_ewma``
* ``required_capacity``
:type signal: str
"""
if signal in ('util', 'load'):
df = self._df_either_event(self._SCHED_PELT_SE_NAMES)
elif signal in ('util_est_enqueued', 'util_est_ewma'):
df = self._df_uniformized_signal('sched_util_est_task')
elif signal == 'required_capacity':
# Add a column which represents the max capacity of the smallest
# CPU which can accomodate the task utilization
capacities = sorted(self.trace.plat_info["cpu-capacities"]['orig'].values())
def fits_capacity(util):
for capacity in capacities:
if util <= capacity:
return capacity
return capacities[-1]
df = self._df_either_event(self._SCHED_PELT_SE_NAMES)
df['required_capacity'] = df['util'].map(fits_capacity)
else:
raise ValueError('Signal "{}" not supported'.format(signal))
# Select the available columns among
columns = {'cpu', 'comm', 'pid', 'update_time', signal}
columns = sorted(set(df.columns) & columns)
return df[columns]
@TraceAnalysisBase.cache
@df_tasks_signal.used_events
def df_task_signal(self, task, signal):
"""
Same as :meth:`df_tasks_signal` but for one task only.
:param task: The name or PID of the task, or a tuple ``(pid, comm)``
:type task: str or int or tuple
:param signal: See :meth:`df_tasks_signal`.
"""
task_id = self.trace.get_task_id(task, update=False)
df = self.df_tasks_signal(signal=signal)
return df_filter_task_ids(df, [task_id])
@deprecate(replaced_by=df_tasks_signal, deprecated_in='2.0', removed_in='2.1')
@requires_one_event_of(*_SCHED_PELT_SE_NAMES)
def df_tasks_signals(self):
"""
Get the load-tracking signals for the tasks
:returns: a :class:`pandas.DataFrame` with:
* A ``util`` column (the average utilization of a task at time t)
* A ``load`` column (the average load of a task at time t)
If CPU capacity information is available:
* A ``required_capacity`` column (the minimum available CPU capacity
required to run this task without being CPU-bound)
"""
df = self._df_either_event(self._SCHED_PELT_SE_NAMES)
if "orig" in self.trace.plat_info['cpu-capacities']:
df['required_capacity'] = self.df_tasks_signal('required_capacity')['required_capacity']
return df
@TraceAnalysisBase.cache
@df_tasks_signal.used_events
def df_top_big_tasks(self, util_threshold, min_samples=100):
"""
Tasks which had 'utilization' samples bigger than the specified
threshold
:param min_samples: minumum number of samples over the min_utilization
:type min_samples: int
:param min_utilization: minimum utilization used to filter samples
default: capacity of a little cluster
:type min_utilization: int
:returns: a :class:`pandas.DataFrame` with:
* Task PIDs as index
* A ``samples`` column (The number of util samples above the threshold)
"""
df = self.df_tasks_signal('util')
# Compute number of samples above threshold
samples = df[df.util > util_threshold].groupby('pid').count()["util"]
samples = samples[samples > min_samples]
samples = samples.sort_values(ascending=False)
top_df = pd.DataFrame(samples).rename(columns={"util": "samples"})
def get_name(pid):
return self.trace.get_task_pid_names(pid)[-1]
top_df["comm"] = top_df.index.map(get_name)
return top_df
@TraceAnalysisBase.plot_method(return_axis=True)
@may_use_events(
StatusAnalysis.plot_overutilized.used_events,
'cpu_capacity',
)
@df_cpus_signal.used_events
def plot_cpus_signals(self, cpus=None, signals: TypedList[str]=['util', 'load'], axis=None, **kwargs):
"""
Plot the CPU-related load-tracking signals
:param cpus: list of CPUs to be plotted
:type cpus: list(int)
:param signals: List of signals to plot.
:type signals: list(str)
"""
cpus = cpus or list(range(self.trace.cpus_count))
window = self.trace.window
def plotter(axes, local_fig):
axes = axes if len(cpus) > 1 else itertools.repeat(axes)
for cpu, axis in zip(cpus, axes):
# Add CPU utilization
axis.set_title('CPU{}'.format(cpu))
for signal in signals:
df = self.df_cpus_signal(signal)
df = df[df['cpu'] == cpu]
df = df_refit_index(df, window=window)
df[signal].plot(ax=axis, drawstyle='steps-post', alpha=0.4)
self.trace.analysis.cpus.plot_orig_capacity(cpu, axis=axis)
# Add capacities data if available
if self.trace.has_events('cpu_capacity'):
df = self.trace.df_events('cpu_capacity')
df = df[df["__cpu"] == cpu]
if len(df):
data = df[['capacity']]
data = df_refit_index(data, window=window)
data.plot(ax=axis, style=['m', '--y'],
drawstyle='steps-post')
# Add overutilized signal to the plot
plot_overutilized = self.trace.analysis.status.plot_overutilized
if self.trace.has_events(plot_overutilized.used_events):
plot_overutilized(axis=axis)
axis.set_ylim(0, 1100)
axis.legend()
return self.do_plot(plotter, nrows=len(cpus), sharex=True, axis=axis, **kwargs)
@TraceAnalysisBase.plot_method()
@df_task_signal.used_events
def plot_task_signals(self, task: TaskID, axis, local_fig, signals: TypedList[str]=['util', 'load']):
"""
Plot the task-related load-tracking signals
:param task: The name or PID of the task, or a tuple ``(pid, comm)``
:type task: str or int or tuple
:param signals: List of signals to plot.
:type signals: list(str)
"""
window = self.trace.window
task = self.trace.get_task_id(task, update=False)
for signal in signals:
df = self.df_task_signal(task, signal)
df = df_refit_index(df, window=window)
df[signal].plot(ax=axis, drawstyle='steps-post', alpha=0.4)
plot_overutilized = self.trace.analysis.status.plot_overutilized
if self.trace.has_events(plot_overutilized.used_events):
plot_overutilized(axis=axis)
axis.set_title('Load-tracking signals of task {}'.format(task))
axis.legend()
axis.grid(True)
@TraceAnalysisBase.plot_method(return_axis=True)
@df_tasks_signal.used_events
def plot_task_required_capacity(self, task: TaskID, axis=None, **kwargs):
"""
Plot the minimum required capacity of a task
:param task: The name or PID of the task, or a tuple ``(pid, comm)``
:type task: str or int or tuple
"""
window = self.trace.window
task_ids = self.trace.get_task_ids(task)
df = self.df_tasks_signal('required_capacity')
df = df_filter_task_ids(df, task_ids)
df = df_refit_index(df, window=window)
# Build task names (there could be multiple, during the task lifetime)
task_name = 'Task ({})'.format(', '.join(map(str, task_ids)))
def plotter(axis, local_fig):
df["required_capacity"].plot(
drawstyle='steps-post',
ax=axis)
axis.legend()
axis.grid(True)
if local_fig:
axis.set_title(task_name)
axis.set_ylim(0, 1100)
axis.set_ylabel('Utilization')
axis.set_xlabel('Time (s)')
return self.do_plot(plotter, height=8, axis=axis, **kwargs)
@TraceAnalysisBase.plot_method()
@df_task_signal.used_events
def plot_task_placement(self, task: TaskID, axis, local_fig):
"""
Plot the CPU placement of the task
:param task: The name or PID of the task, or a tuple ``(pid, comm)``
:type task: str or int or tuple
"""
task_id = self.trace.get_task_id(task, update=False)
# Get all utilization update events
df = self.df_task_signal(task_id, 'required_capacity').copy()
cpu_capacities = self.trace.plat_info["cpu-capacities"]['orig']
df['capacity'] = df['cpu'].map(cpu_capacities)
def add_placement(df, comp, comp_str):
placement = "CPU capacity {} required capacity".format(comp_str)
condition = comp(df['capacity'], df['required_capacity'])
df.loc[condition, 'placement'] = placement
add_placement(df, operator.lt, '<')
add_placement(df, operator.gt, '>')
add_placement(df, operator.eq, '==')
for cols, placement_df in df_split_signals(df, ['placement']):
placement = cols['placement']
series = df["cpu"]
series = series_refit_index(series, window=self.trace.window)
series.plot(ax=axis, style="+", label=placement)
plot_overutilized = self.trace.analysis.status.plot_overutilized
if self.trace.has_events(plot_overutilized.used_events):
plot_overutilized(axis=axis)
if local_fig:
axis.set_title('Utilization vs placement of task "{}"'.format(task))
axis.grid(True)
axis.legend()
# vim :set tabstop=4 shiftwidth=4 expandtab textwidth=80
lisa.analysis.load_tracking: Add missing type annotation
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2018, Arm Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Scheduler load tracking analysis module """
import operator
import itertools
import pandas as pd
from lisa.analysis.base import TraceAnalysisBase
from lisa.analysis.status import StatusAnalysis
from lisa.trace import requires_one_event_of, may_use_events, TaskID, CPU
from lisa.utils import deprecate
from lisa.datautils import df_refit_index, series_refit_index, df_filter_task_ids, df_split_signals
from lisa.conf import TypedList
class LoadTrackingAnalysis(TraceAnalysisBase):
"""
Support for scheduler load tracking analysis
:param trace: input Trace object
:type trace: lisa.trace.Trace
"""
name = 'load_tracking'
_SCHED_PELT_SE_NAMES = [
'sched_pelt_se',
'sched_load_se',
'sched_load_avg_task'
]
"""
All the names that the per-task load tracking event ever had in various
kernel versions (Android, mainline etc)
"""
_SCHED_PELT_CFS_NAMES = [
'sched_pelt_cfs',
'sched_load_cfs_rq',
'sched_load_avg_cpu',
]
"""
All the names that the per-CPU load tracking event ever had in various
kernel versions (Android, mainline etc)
"""
@classmethod
def _columns_renaming(cls, event):
"""
Columns to rename to unify dataframes between trace event versions
"""
if event in ['sched_load_avg_cpu', 'sched_load_avg_task']:
return {
"util_avg": "util",
"load_avg": "load"
}
if event == 'sched_util_est_task':
return {
'est_enqueued': 'util_est_enqueued',
'est_ewma': 'util_est_ewma',
}
return {}
@classmethod
def _columns_to_drop(cls, event):
"""
The extra columns not shared between trace event versions
"""
if event in [*cls._SCHED_PELT_CFS_NAMES, 'sched_load_se', 'sched_pelt_se']:
return ['path', 'rbl_load', 'runnable']
if event in ['sched_load_avg_task']:
return ['load_sum', 'period_contrib', 'util_sum']
return []
def _df_uniformized_signal(self, event):
df = self.trace.df_events(event)
df = df.rename(columns=self._columns_renaming(event), copy=True)
# Legacy sched_load_avg_* events don't have a `path` field.
if not event.startswith('sched_load_avg_'):
if event in self._SCHED_PELT_SE_NAMES:
df = df[df.path == "(null)"]
if event in self._SCHED_PELT_CFS_NAMES:
df = df[df.path == "/"]
to_drop = self._columns_to_drop(event)
df.drop(columns=to_drop, inplace=True, errors='ignore')
return df
def _df_either_event(self, events):
for event in events:
if event not in self.trace.available_events:
continue
return self._df_uniformized_signal(event)
raise RuntimeError("Trace is missing one of either events: {}".format(events))
@may_use_events(
requires_one_event_of(*_SCHED_PELT_CFS_NAMES),
'sched_util_est_cpu'
)
def df_cpus_signal(self, signal):
"""
Get the load-tracking signals for the CPUs
:returns: a :class:`pandas.DataFrame` with a column of the same name as
the specified ``signal``, and additional context columns such as
``cpu``.
:param signal: Signal name to get. Can be any of:
* ``util``
* ``load``
* ``util_est_enqueued``
:type signal: str
"""
if signal in ('util', 'load'):
df = self._df_either_event(self._SCHED_PELT_CFS_NAMES)
elif signal == 'util_est_enqueued':
df = self._df_uniformized_signal('sched_util_est_cpu')
else:
raise ValueError('Signal "{}" not supported'.format(signal))
return df[['cpu', signal]]
@deprecate(replaced_by=df_cpus_signal, deprecated_in='2.0', removed_in='2.1')
@requires_one_event_of(*_SCHED_PELT_CFS_NAMES)
def df_cpus_signals(self):
"""
Get the load-tracking signals for the CPUs
:returns: a :class:`pandas.DataFrame` with:
* A ``util`` column (the average utilization of a CPU at time t)
* A ``load`` column (the average load of a CPU at time t)
"""
return self._df_either_event(self._SCHED_PELT_CFS_NAMES)
@TraceAnalysisBase.cache
@may_use_events(
requires_one_event_of(*_SCHED_PELT_SE_NAMES),
'sched_util_est_task'
)
def df_tasks_signal(self, signal):
"""
Get the load-tracking signals for the tasks
:returns: a :class:`pandas.DataFrame` with a column of the same name as
the specified ``signal``, and additional context columns such as
``cpu``.
:param signal: Signal name to get. Can be any of:
* ``util``
* ``load``
* ``util_est_enqueued``
* ``util_est_ewma``
* ``required_capacity``
:type signal: str
"""
if signal in ('util', 'load'):
df = self._df_either_event(self._SCHED_PELT_SE_NAMES)
elif signal in ('util_est_enqueued', 'util_est_ewma'):
df = self._df_uniformized_signal('sched_util_est_task')
elif signal == 'required_capacity':
# Add a column which represents the max capacity of the smallest
# CPU which can accomodate the task utilization
capacities = sorted(self.trace.plat_info["cpu-capacities"]['orig'].values())
def fits_capacity(util):
for capacity in capacities:
if util <= capacity:
return capacity
return capacities[-1]
df = self._df_either_event(self._SCHED_PELT_SE_NAMES)
df['required_capacity'] = df['util'].map(fits_capacity)
else:
raise ValueError('Signal "{}" not supported'.format(signal))
# Select the available columns among
columns = {'cpu', 'comm', 'pid', 'update_time', signal}
columns = sorted(set(df.columns) & columns)
return df[columns]
@TraceAnalysisBase.cache
@df_tasks_signal.used_events
def df_task_signal(self, task, signal):
"""
Same as :meth:`df_tasks_signal` but for one task only.
:param task: The name or PID of the task, or a tuple ``(pid, comm)``
:type task: str or int or tuple
:param signal: See :meth:`df_tasks_signal`.
"""
task_id = self.trace.get_task_id(task, update=False)
df = self.df_tasks_signal(signal=signal)
return df_filter_task_ids(df, [task_id])
@deprecate(replaced_by=df_tasks_signal, deprecated_in='2.0', removed_in='2.1')
@requires_one_event_of(*_SCHED_PELT_SE_NAMES)
def df_tasks_signals(self):
"""
Get the load-tracking signals for the tasks
:returns: a :class:`pandas.DataFrame` with:
* A ``util`` column (the average utilization of a task at time t)
* A ``load`` column (the average load of a task at time t)
If CPU capacity information is available:
* A ``required_capacity`` column (the minimum available CPU capacity
required to run this task without being CPU-bound)
"""
df = self._df_either_event(self._SCHED_PELT_SE_NAMES)
if "orig" in self.trace.plat_info['cpu-capacities']:
df['required_capacity'] = self.df_tasks_signal('required_capacity')['required_capacity']
return df
@TraceAnalysisBase.cache
@df_tasks_signal.used_events
def df_top_big_tasks(self, util_threshold, min_samples=100):
"""
Tasks which had 'utilization' samples bigger than the specified
threshold
:param min_samples: minumum number of samples over the min_utilization
:type min_samples: int
:param min_utilization: minimum utilization used to filter samples
default: capacity of a little cluster
:type min_utilization: int
:returns: a :class:`pandas.DataFrame` with:
* Task PIDs as index
* A ``samples`` column (The number of util samples above the threshold)
"""
df = self.df_tasks_signal('util')
# Compute number of samples above threshold
samples = df[df.util > util_threshold].groupby('pid').count()["util"]
samples = samples[samples > min_samples]
samples = samples.sort_values(ascending=False)
top_df = pd.DataFrame(samples).rename(columns={"util": "samples"})
def get_name(pid):
return self.trace.get_task_pid_names(pid)[-1]
top_df["comm"] = top_df.index.map(get_name)
return top_df
@TraceAnalysisBase.plot_method(return_axis=True)
@may_use_events(
StatusAnalysis.plot_overutilized.used_events,
'cpu_capacity',
)
@df_cpus_signal.used_events
def plot_cpus_signals(self, cpus: TypedList[CPU]=None, signals: TypedList[str]=['util', 'load'], axis=None, **kwargs):
"""
Plot the CPU-related load-tracking signals
:param cpus: list of CPUs to be plotted
:type cpus: list(int)
:param signals: List of signals to plot.
:type signals: list(str)
"""
cpus = cpus or list(range(self.trace.cpus_count))
window = self.trace.window
def plotter(axes, local_fig):
axes = axes if len(cpus) > 1 else itertools.repeat(axes)
for cpu, axis in zip(cpus, axes):
# Add CPU utilization
axis.set_title('CPU{}'.format(cpu))
for signal in signals:
df = self.df_cpus_signal(signal)
df = df[df['cpu'] == cpu]
df = df_refit_index(df, window=window)
df[signal].plot(ax=axis, drawstyle='steps-post', alpha=0.4)
self.trace.analysis.cpus.plot_orig_capacity(cpu, axis=axis)
# Add capacities data if available
if self.trace.has_events('cpu_capacity'):
df = self.trace.df_events('cpu_capacity')
df = df[df["__cpu"] == cpu]
if len(df):
data = df[['capacity']]
data = df_refit_index(data, window=window)
data.plot(ax=axis, style=['m', '--y'],
drawstyle='steps-post')
# Add overutilized signal to the plot
plot_overutilized = self.trace.analysis.status.plot_overutilized
if self.trace.has_events(plot_overutilized.used_events):
plot_overutilized(axis=axis)
axis.set_ylim(0, 1100)
axis.legend()
return self.do_plot(plotter, nrows=len(cpus), sharex=True, axis=axis, **kwargs)
@TraceAnalysisBase.plot_method()
@df_task_signal.used_events
def plot_task_signals(self, task: TaskID, axis, local_fig, signals: TypedList[str]=['util', 'load']):
"""
Plot the task-related load-tracking signals
:param task: The name or PID of the task, or a tuple ``(pid, comm)``
:type task: str or int or tuple
:param signals: List of signals to plot.
:type signals: list(str)
"""
window = self.trace.window
task = self.trace.get_task_id(task, update=False)
for signal in signals:
df = self.df_task_signal(task, signal)
df = df_refit_index(df, window=window)
df[signal].plot(ax=axis, drawstyle='steps-post', alpha=0.4)
plot_overutilized = self.trace.analysis.status.plot_overutilized
if self.trace.has_events(plot_overutilized.used_events):
plot_overutilized(axis=axis)
axis.set_title('Load-tracking signals of task {}'.format(task))
axis.legend()
axis.grid(True)
@TraceAnalysisBase.plot_method(return_axis=True)
@df_tasks_signal.used_events
def plot_task_required_capacity(self, task: TaskID, axis=None, **kwargs):
"""
Plot the minimum required capacity of a task
:param task: The name or PID of the task, or a tuple ``(pid, comm)``
:type task: str or int or tuple
"""
window = self.trace.window
task_ids = self.trace.get_task_ids(task)
df = self.df_tasks_signal('required_capacity')
df = df_filter_task_ids(df, task_ids)
df = df_refit_index(df, window=window)
# Build task names (there could be multiple, during the task lifetime)
task_name = 'Task ({})'.format(', '.join(map(str, task_ids)))
def plotter(axis, local_fig):
df["required_capacity"].plot(
drawstyle='steps-post',
ax=axis)
axis.legend()
axis.grid(True)
if local_fig:
axis.set_title(task_name)
axis.set_ylim(0, 1100)
axis.set_ylabel('Utilization')
axis.set_xlabel('Time (s)')
return self.do_plot(plotter, height=8, axis=axis, **kwargs)
@TraceAnalysisBase.plot_method()
@df_task_signal.used_events
def plot_task_placement(self, task: TaskID, axis, local_fig):
"""
Plot the CPU placement of the task
:param task: The name or PID of the task, or a tuple ``(pid, comm)``
:type task: str or int or tuple
"""
task_id = self.trace.get_task_id(task, update=False)
# Get all utilization update events
df = self.df_task_signal(task_id, 'required_capacity').copy()
cpu_capacities = self.trace.plat_info["cpu-capacities"]['orig']
df['capacity'] = df['cpu'].map(cpu_capacities)
def add_placement(df, comp, comp_str):
placement = "CPU capacity {} required capacity".format(comp_str)
condition = comp(df['capacity'], df['required_capacity'])
df.loc[condition, 'placement'] = placement
add_placement(df, operator.lt, '<')
add_placement(df, operator.gt, '>')
add_placement(df, operator.eq, '==')
for cols, placement_df in df_split_signals(df, ['placement']):
placement = cols['placement']
series = df["cpu"]
series = series_refit_index(series, window=self.trace.window)
series.plot(ax=axis, style="+", label=placement)
plot_overutilized = self.trace.analysis.status.plot_overutilized
if self.trace.has_events(plot_overutilized.used_events):
plot_overutilized(axis=axis)
if local_fig:
axis.set_title('Utilization vs placement of task "{}"'.format(task))
axis.grid(True)
axis.legend()
# vim :set tabstop=4 shiftwidth=4 expandtab textwidth=80
|
lista = list(range(5))
x = 0
while x < 5:
lista[x] = int(input("Digite um numero inteiro"))
x += 1
print (lista)
Update ipc_lista4.01.py
#Nickso Patrick Façanha Calheiros - 1615310059
lista = list(range(5))
x = 0
while x < 5:
lista[x] = int(input("Digite um numero inteiro"))
x += 1
print (lista)
|
# -*- coding: utf-8 -*-
from expects import expect
from mamba.settings import Settings
IRRELEVANT_SLOW_TEST_THRESHOLD = 'irrelevant slow test threeshold'
IRRELEVANT_ENABLE_CODE_COVERAGE = 'irrelevant enable code coverage'
IRRELEVANT_ENABLE_FILE_WATCHER = 'irrelevant enable file watcher'
with description(Settings):
with before('each'):
self.subject = Settings()
with context('when loading defaults'):
with it('has 75 millis as slow test threshold'):
expect(self.subject).to.have.property('slow_test_threshold').to.be.equal(0.075)
with it('has code coverage disabled by default'):
expect(self.subject).to.have.property('enable_code_coverage').to.be.false
with it('has file watcher disabled by default'):
expect(self.subject).to.have.property('enable_file_watcher').to.be.false
with context('when setting custom values'):
with it('sets slow test threshold'):
self.subject.slow_test_threshold = IRRELEVANT_SLOW_TEST_THRESHOLD
expect(self.subject).to.have.property('slow_test_threshold').to.be.equal(IRRELEVANT_SLOW_TEST_THRESHOLD)
with it('sets code coverage'):
self.subject.enable_code_coverage = IRRELEVANT_ENABLE_CODE_COVERAGE
expect(self.subject).to.have.property('enable_code_coverage').to.be.equal(IRRELEVANT_ENABLE_CODE_COVERAGE)
with it('sets file watcher'):
self.subject.enable_file_watcher = IRRELEVANT_ENABLE_FILE_WATCHER
expect(self.subject).to.have.property('enable_file_watcher').to.be.equal(IRRELEVANT_ENABLE_FILE_WATCHER)
Refactor for removing settings creation
Let mamba do it for you! :metal:
# -*- coding: utf-8 -*-
from expects import expect
from mamba.settings import Settings
IRRELEVANT_SLOW_TEST_THRESHOLD = 'irrelevant slow test threeshold'
IRRELEVANT_ENABLE_CODE_COVERAGE = 'irrelevant enable code coverage'
IRRELEVANT_ENABLE_FILE_WATCHER = 'irrelevant enable file watcher'
with description(Settings):
with context('when loading defaults'):
with it('has 75 millis as slow test threshold'):
expect(self.subject).to.have.property('slow_test_threshold').to.be.equal(0.075)
with it('has code coverage disabled by default'):
expect(self.subject).to.have.property('enable_code_coverage').to.be.false
with it('has file watcher disabled by default'):
expect(self.subject).to.have.property('enable_file_watcher').to.be.false
with context('when setting custom values'):
with it('sets slow test threshold'):
self.subject.slow_test_threshold = IRRELEVANT_SLOW_TEST_THRESHOLD
expect(self.subject).to.have.property('slow_test_threshold').to.be.equal(IRRELEVANT_SLOW_TEST_THRESHOLD)
with it('sets code coverage'):
self.subject.enable_code_coverage = IRRELEVANT_ENABLE_CODE_COVERAGE
expect(self.subject).to.have.property('enable_code_coverage').to.be.equal(IRRELEVANT_ENABLE_CODE_COVERAGE)
with it('sets file watcher'):
self.subject.enable_file_watcher = IRRELEVANT_ENABLE_FILE_WATCHER
expect(self.subject).to.have.property('enable_file_watcher').to.be.equal(IRRELEVANT_ENABLE_FILE_WATCHER)
|
VERSION="0.4"
Upped version to 0.5
VERSION="0.5"
|
##########################################################################
#
# Copyright (c) 2007, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
from IECore import *
class TestVectorDataFilterOp( unittest.TestCase ) :
def test( self ) :
i = IntVectorData( [ 1, 2, 3, 4, 5, 6 ] )
f = BoolVectorData( [ 0, 1, 0, 1 ] )
ii = VectorDataFilterOp()( input = i, filter = f, invert = False, clip = True )
self.assertEqual( ii, IntVectorData( [ 2, 4 ] ) )
ii = VectorDataFilterOp()( input = i, filter = f, invert = True, clip = True )
self.assertEqual( ii, IntVectorData( [ 1, 3 ] ) )
ii = VectorDataFilterOp()( input = i, filter = f, invert = False, clip = False )
self.assertEqual( ii, IntVectorData( [ 2, 4, 5, 6 ] ) )
ii = VectorDataFilterOp()( input = i, filter = f, invert = True, clip = False )
self.assertEqual( ii, IntVectorData( [ 1, 3, 5, 6 ] ) )
if __name__ == "__main__":
unittest.main()
Adding test.
##########################################################################
#
# Copyright (c) 2007, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import random
from IECore import *
class TestVectorDataFilterOp( unittest.TestCase ) :
def test( self ) :
i = IntVectorData( [ 1, 2, 3, 4, 5, 6 ] )
f = BoolVectorData( [ 0, 1, 0, 1 ] )
ii = VectorDataFilterOp()( input = i, filter = f, invert = False, clip = True )
self.assertEqual( ii, IntVectorData( [ 2, 4 ] ) )
ii = VectorDataFilterOp()( input = i, filter = f, invert = True, clip = True )
self.assertEqual( ii, IntVectorData( [ 1, 3 ] ) )
ii = VectorDataFilterOp()( input = i, filter = f, invert = False, clip = False )
self.assertEqual( ii, IntVectorData( [ 2, 4, 5, 6 ] ) )
ii = VectorDataFilterOp()( input = i, filter = f, invert = True, clip = False )
self.assertEqual( ii, IntVectorData( [ 1, 3, 5, 6 ] ) )
def testOperateInPlace( self ) :
f = BoolVectorData( [ 0, 1, 0, 1 ] )
i = IntVectorData( [ 1, 2, 3, 4, 5, 6 ] )
VectorDataFilterOp()( input = i, copyInput = False, filter = f, invert = False, clip = True )
self.assertEqual( i, IntVectorData( [ 2, 4 ] ) )
i = IntVectorData( [ 1, 2, 3, 4, 5, 6 ] )
VectorDataFilterOp()( input = i, copyInput = False, filter = f, invert = True, clip = True )
self.assertEqual( i, IntVectorData( [ 1, 3 ] ) )
i = IntVectorData( [ 1, 2, 3, 4, 5, 6 ] )
VectorDataFilterOp()( input = i, copyInput = False, filter = f, invert = False, clip = False )
self.assertEqual( i, IntVectorData( [ 2, 4, 5, 6 ] ) )
i = IntVectorData( [ 1, 2, 3, 4, 5, 6 ] )
VectorDataFilterOp()( input = i, copyInput = False, filter = f, invert = True, clip = False )
self.assertEqual( i, IntVectorData( [ 1, 3, 5, 6 ] ) )
for i in range( 0, 1000 ) :
m = BoolVectorData()
v = V3fVectorData()
n = 0
for j in range( 0, random.randint( 0, 1000 ) ) :
m.append( random.randint( 0,1 ) )
n += m[-1]
v.append( V3f( 0 ) )
VectorDataFilterOp()( input = v, copyInput = False, filter = m )
self.assertEqual( len( v ), n )
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/python
"""
Diaphora, a diffing plugin for IDA
Copyright (c) 2015, Joxean Koret
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
KNOWN BUGS:
[ ] The choosers aren't updated when importing stuff.
TODO (for future versions):
[ ] Heuristics based on the call graph. This is why BinDiff was/is the
best one.
[ ] Heuristics based on switchs (SPP with get_switch_info_ex(x).ncases?).
[ ] Instruction-level comment porting.
[ ] Import all names (global variables, etc...).
"""
import os
import sys
import time
import json
import decimal
import sqlite3
import traceback
from hashlib import md5
from cStringIO import StringIO
from difflib import SequenceMatcher, HtmlDiff
from pygments import highlight
from pygments.lexers import NasmLexer, CppLexer
from pygments.formatters import HtmlFormatter
from idc import *
from idaapi import *
from idautils import *
from PySide import QtGui
from others.tarjan_sort import strongly_connected_components, robust_topological_sort
from jkutils.kfuzzy import CKoretFuzzyHashing
from jkutils.factor import (FACTORS_CACHE, difference, difference_ratio,
primesbelow as primes)
#-----------------------------------------------------------------------
VERSION_VALUE = "1.0.3"
COPYRIGHT_VALUE="Copyright(c) 2015 Joxean Koret"
COMMENT_VALUE="Diaphora diffing plugin for IDA version %s" % VERSION_VALUE
# Constants unexported in IDA Python
PRTYPE_SEMI=0x0008
# Used to clean-up the pseudo-code and assembly dumps in order to get
# better comparison ratios
CMP_REPS = ["loc_", "sub_", "qword_", "dword_", "byte_", "word_", "off_",
"unk_", "stru_", "dbl_"]
# Messages
MSG_RELAXED_RATIO_ENABLED = """AUTOHIDE DATABASE\n<b>Relaxed ratio calculations</b> will be enabled. It will ignore many small
modifications to functions and will match more functions with higher ratios. Enable this option if you're only interested in the
new functionality. Disable it for patch diffing if you're interested in small modifications (like buffer sizes).
<br><br>
This is automatically done for diffing big databases (more than 20,000 functions in the database).<br><br>
You can disable it by un-checking the 'Relaxed calculations of differences ratios' option."""
MSG_FUNCTION_SUMMARIES_ONLY = """AUTOHIDE DATABASE\n<b>Do not export basic blocks or instructions</b> will be enabled.<br>
It will not export the information relative to basic blocks or<br>
instructions and 'Diff assembly in a graph' will not be available.
<br><br>
This is automatically done for exporting huge databases with<br>
more than 100,000 functions.<br><br>
You can disable it by un-checking the 'Do not export basic blocks<br>
or instructions' option."""
#-----------------------------------------------------------------------
def log(msg):
Message("[%s] %s\n" % (time.asctime(), msg))
#-----------------------------------------------------------------------
def log_refresh(msg, show=False):
if show:
show_wait_box(msg)
else:
replace_wait_box(msg)
log(msg)
#-----------------------------------------------------------------------
def quick_ratio(buf1, buf2):
try:
if buf1 is None or buf2 is None or buf1 == "" or buf1 == "":
return 0
s = SequenceMatcher(None, buf1.split("\n"), buf2.split("\n"))
return s.quick_ratio()
except:
print "quick_ratio:", str(sys.exc_info()[1])
return 0
#-----------------------------------------------------------------------
def real_quick_ratio(buf1, buf2):
try:
if buf1 is None or buf2 is None or buf1 == "" or buf1 == "":
return 0
s = SequenceMatcher(None, buf1.split("\n"), buf2.split("\n"))
return s.real_quick_ratio()
except:
print "real_quick_ratio:", str(sys.exc_info()[1])
return 0
#-----------------------------------------------------------------------
def ast_ratio(ast1, ast2):
if ast1 == ast2:
return 1.0
elif ast1 is None or ast2 is None:
return 0
return difference_ratio(decimal.Decimal(ast1), decimal.Decimal(ast2))
#-----------------------------------------------------------------------
class CHtmlViewer(PluginForm):
def OnCreate(self, form):
self.parent = self.FormToPySideWidget(form)
self.PopulateForm()
self.browser = None
self.layout = None
return 1
def PopulateForm(self):
self.layout = QtGui.QVBoxLayout()
self.browser = QtGui.QTextBrowser()
# Commented for now
#self.browser.setLineWrapMode(QtGui.QTextEdit.NoWrap)
self.browser.setHtml(self.text)
self.browser.setReadOnly(True)
self.browser.setFontWeight(12)
self.layout.addWidget(self.browser)
self.parent.setLayout(self.layout)
def Show(self, text, title):
self.text = text
return PluginForm.Show(self, title)
#-----------------------------------------------------------------------
class CChooser(Choose2):
class Item:
def __init__(self, ea, name, ea2 = None, name2 = None, desc="100% equal", ratio = 0):
self.ea = ea
self.vfname = name
self.ea2 = ea2
self.vfname2 = name2
self.description = desc
self.ratio = ratio
self.cmd_import_selected = None
self.cmd_import_all = None
self.cmd_import_all_funcs = None
def __str__(self):
return '%08x' % self.ea
def __init__(self, title, bindiff, show_commands=True):
if title.startswith("Unmatched in"):
Choose2.__init__(self, title, [ ["Line", 8], ["Address", 10], ["Name", 20] ], Choose2.CH_MULTI)
else:
Choose2.__init__(self, title, [ ["Line", 8], ["Address", 10], ["Name", 20], ["Address 2", 10], ["Name 2", 20], ["Ratio", 5], ["Description", 30] ], Choose2.CH_MULTI)
if title == "Unmatched in primary":
self.primary = False
else:
self.primary = True
self.n = 0
self.items = []
self.icon = 41
self.bindiff = bindiff
self.show_commands = show_commands
self.cmd_diff_asm = None
self.cmd_diff_graph = None
self.cmd_diff_c = None
self.cmd_import_selected = None
self.cmd_import_all = None
self.cmd_import_all_funcs = None
self.cmd_show_asm = None
self.cmd_show_pseudo = None
self.cmd_highlight_functions = None
self.cmd_unhighlight_functions = None
self.selected_items = []
def OnClose(self):
"""space holder"""
return True
def OnEditLine(self, n):
"""space holder"""
def OnInsertLine(self):
pass
def OnSelectLine(self, n):
item = self.items[int(n)]
if self.primary:
try:
jump_ea = int(item[1], 16)
# Only jump for valid addresses
if isEnabled(jump_ea):
jumpto(jump_ea)
except:
print "OnSelectLine", sys.exc_info()[1]
else:
self.bindiff.show_asm(self.items[n], self.primary)
def OnGetLine(self, n):
try:
return self.items[n]
except:
print "OnGetLine", sys.exc_info()[1]
def OnGetSize(self):
return len(self.items)
def OnDeleteLine(self, n):
try:
del self.items[n]
self.n -= 1
except:
pass
return True
def OnRefresh(self, n):
return n
def add_item(self, item):
if self.title.startswith("Unmatched in"):
self.items.append(["%05lu" % self.n, "%08x" % int(item.ea), item.vfname])
else:
self.items.append(["%05lu" % self.n, "%08x" % int(item.ea), item.vfname, "%08x" % int(item.ea2), item.vfname2, "%.3f" % item.ratio, item.description])
self.n += 1
def show(self, force=False):
t = self.Show()
if t < 0:
return False
if self.show_commands and (self.cmd_diff_asm is None or force):
# create aditional actions handlers
self.cmd_diff_asm = self.AddCommand("Diff assembly")
self.cmd_diff_c = self.AddCommand("Diff pseudo-code")
self.cmd_diff_graph = self.AddCommand("Diff assembly in a graph")
self.cmd_import_selected = self.AddCommand("Import selected")
self.cmd_import_all = self.AddCommand("Import *all* functions")
self.cmd_import_all_funcs = self.AddCommand("Import *all* data for sub_* functions")
self.cmd_highlight_functions = self.AddCommand("Highlight matches")
self.cmd_unhighlight_functions = self.AddCommand("Unhighlight matches")
elif not self.show_commands and (self.cmd_show_asm is None or force):
self.cmd_show_asm = self.AddCommand("Show assembly")
self.cmd_show_pseudo = self.AddCommand("Show pseudo-code")
return True
def get_color(self):
if self.title.startswith("Best"):
return 0xffff99
elif self.title.startswith("Partial"):
return 0x99ff99
elif self.title.startswith("Unreliable"):
return 0x9999ff
def OnCommand(self, n, cmd_id):
# Aditional right-click-menu commands handles
if cmd_id == self.cmd_import_all:
if askyn_c(1, "HIDECANCEL\nDo you really want to import all matched functions, comments, prototypes and definitions?") == 1:
self.bindiff.import_all(self.items)
elif cmd_id == self.cmd_import_all_funcs:
if askyn_c(1, "HIDECANCEL\nDo you really want to import all IDA named matched functions, comments, prototypes and definitions?") == 1:
self.bindiff.import_all_auto(self.items)
elif cmd_id == self.cmd_import_selected:
if len(self.selected_items) <= 1:
self.bindiff.import_one(self.items[n])
else:
if askyn_c(1, "HIDECANCEL\nDo you really want to import all selected IDA named matched functions, comments, prototypes and definitions?") == 1:
self.bindiff.import_selected(self.items, self.selected_items)
elif cmd_id == self.cmd_diff_c:
self.bindiff.show_pseudo_diff(self.items[n])
elif cmd_id == self.cmd_diff_asm:
self.bindiff.show_asm_diff(self.items[n])
elif cmd_id == self.cmd_show_asm:
self.bindiff.show_asm(self.items[n], self.primary)
elif cmd_id == self.cmd_show_pseudo:
self.bindiff.show_pseudo(self.items[n], self.primary)
elif cmd_id == self.cmd_highlight_functions:
if askyn_c(1, "HIDECANCEL\nDo you want to change the background color of each matched function?") == 1:
color = self.get_color()
for item in self.items:
ea = int(item[1], 16)
if not SetColor(ea, CIC_FUNC, color):
print "Error setting color for %x" % ea
Refresh()
elif cmd_id == self.cmd_unhighlight_functions:
for item in self.items:
ea = int(item[1], 16)
if not SetColor(ea, CIC_FUNC, 0xFFFFFF):
print "Error setting color for %x" % ea
Refresh()
elif cmd_id == self.cmd_diff_graph:
item = self.items[n]
ea1 = int(item[1], 16)
name1 = item[2]
ea2 = int(item[3], 16)
name2 = item[4]
log("Diff graph for 0x%x - 0x%x" % (ea1, ea2))
self.bindiff.graph_diff(ea1, name1, ea2, name2)
return True
def OnSelectionChange(self, sel_list):
self.selected_items = sel_list
def OnGetLineAttr(self, n):
if not self.title.startswith("Unmatched"):
item = self.items[n]
ratio = float(item[5])
red = int(255 * (1 - ratio))
green = int(128 * ratio)
color = int("0x00%02x%02x" % (green, red), 16)
return [color, 0]
return [0xFFFFFF, 0]
#-----------------------------------------------------------------------
class CBinDiffExporterSetup(Form):
def __init__(self):
s = r"""Diaphora BinDiff
Please select the path to the SQLite database to save the current IDA database and the path of the SQLite database to diff against.
If no SQLite diff database is selected, it will just export the current IDA database to SQLite format. Leave the 2nd field empty if you are
exporting the first database.
SQLite databases: Export filter limits:
<#Select a file to export the current IDA database to SQLite format#Export IDA database to SQLite :{iFileSave}> <#Minimum address to find functions to export#From address:{iMinEA}>
<#Select the SQLite database to diff against #SQLite database to diff against:{iFileOpen}> <#Maximum address to find functions to export#To address :{iMaxEA}>
<Use the decompiler if available:{rUseDecompiler}>
<#Enable if you want neither sub_* functions nor library functions to be exported#Export only non-IDA generated functions:{rNonIdaSubs}>
<#Export only function summaries, not all instructions. Showing differences in a graph between functions will not be available.#Do not export instructions and basic blocks:{rFuncSummariesOnly}>
<Use probably unreliable methods:{rUnreliable}>
<Recommended to disable with databases with more than 5.000 functions#Use slow heuristics:{rSlowHeuristics}>
<#Enable this option if you aren't interested in small changes#Relaxed calculations of differences ratios:{rRelaxRatio}>
<Use experimental heuristics:{rExperimental}>
<#Enable this option to ignore sub_* names for the 'Same name' heuristic.#Ignore automatically generated names:{rIgnoreSubNames}>
<#Enable this option to ignore all function names for the 'Same name' heuristic.#Ignore all function names:{rIgnoreAllNames}>{cGroup1}>
NOTE: Don't select IDA database files (.IDB, .I64) as only SQLite databases are considered.
"""
args = {'iFileSave': Form.FileInput(save=True, swidth=40),
'iFileOpen': Form.FileInput(open=True, swidth=40),
'iMinEA': Form.NumericInput(tp=Form.FT_ADDR, swidth=22),
'iMaxEA': Form.NumericInput(tp=Form.FT_ADDR, swidth=22),
'cGroup1' : Form.ChkGroupControl(("rUseDecompiler",
"rUnreliable",
"rNonIdaSubs",
"rSlowHeuristics",
"rRelaxRatio",
"rExperimental",
"rFuncSummariesOnly",
"rIgnoreSubNames",
"rIgnoreAllNames"))}
Form.__init__(self, s, args)
def set_options(self, opts):
if opts.file_out is not None:
self.iFileSave.value = opts.file_out
if opts.file_in is not None:
self.iFileOpen.value = opts.file_in
self.rUseDecompiler.checked = opts.use_decompiler
self.rUnreliable.checked = opts.unreliable
self.rSlowHeuristics.checked = opts.slow
self.rRelaxRatio.checked = opts.relax
self.rExperimental.checked = opts.experimental
self.iMinEA.value = opts.min_ea
self.iMaxEA.value = opts.max_ea
self.rNonIdaSubs.checked = opts.ida_subs == False
self.rIgnoreSubNames.checked = opts.ignore_sub_names
self.rIgnoreAllNames.checked = opts.ignore_all_names
self.rFuncSummariesOnly.checked = opts.func_summaries_only
def get_options(self):
opts = dict(
file_out = self.iFileSave.value,
file_in = self.iFileOpen.value,
use_decompiler = self.rUseDecompiler.checked,
unreliable = self.rUnreliable.checked,
slow = self.rSlowHeuristics.checked,
relax = self.rRelaxRatio.checked,
experimental = self.rExperimental.checked,
min_ea = self.iMinEA.value,
max_ea = self.iMaxEA.value,
ida_subs = self.rNonIdaSubs.checked == False,
ignore_sub_names = self.rIgnoreSubNames.checked,
ignore_all_names = self.rIgnoreAllNames.checked,
func_summaries_only = self.rFuncSummariesOnly.checked
)
return BinDiffOptions(**opts)
#-----------------------------------------------------------------------
try:
class CAstVisitor(ctree_visitor_t):
def __init__(self, cfunc):
self.primes = primes(1024)
ctree_visitor_t.__init__(self, CV_FAST)
self.cfunc = cfunc
self.primes_hash = 1
return
def visit_expr(self, expr):
try:
self.primes_hash *= self.primes[expr.op]
except:
traceback.print_exc()
return 0
def visit_insn(self, ins):
try:
self.primes_hash *= self.primes[ins.op]
except:
traceback.print_exc()
return 0
except:
# It seems it may cause "problems" with trial versions... may be it
# causes problems too with versions without the decompiler???
class CAstVisitor:
pass
#-----------------------------------------------------------------------
class timeraction_t(object):
def __init__(self, func, args, interval):
self.func = func
self.args = args
self.interval = interval
self.obj = idaapi.register_timer(self.interval, self)
if self.obj is None:
raise RuntimeError, "Failed to register timer"
def __call__(self):
if self.args is not None:
self.func(self.args)
else:
self.func()
return -1
#-----------------------------------------------------------------------
class uitimercallback_t(object):
def __init__(self, g, interval):
self.interval = interval
self.obj = idaapi.register_timer(self.interval, self)
if self.obj is None:
raise RuntimeError, "Failed to register timer"
self.g = g
def __call__(self):
if not "GetTForm" in dir(self.g):
#log("Notice: IDA 6.6 doesn't support GetTForm, as so, it isn't possible to change the zoom.")
return -1
f = self.g.GetTForm()
switchto_tform(f, 1)
process_ui_action("GraphZoomFit", 0)
return -1
#-----------------------------------------------------------------------
class CDiffGraphViewer(GraphViewer):
def __init__(self, title, g, colours):
try:
GraphViewer.__init__(self, title, False)
self.graph = g[0]
self.relations = g[1]
self.nodes = {}
self.colours = colours
except:
Warning("CDiffGraphViewer: OnInit!!! " + str(sys.exc_info()[1]))
def OnRefresh(self):
try:
self.Clear()
self.nodes = {}
for key in self.graph:
self.nodes[key] = self.AddNode([key, self.graph[key]])
for key in self.relations:
if not key in self.nodes:
self.nodes[key] = self.AddNode([key, [[0, 0, ""]]])
parent_node = self.nodes[key]
for child in self.relations[key]:
if not child in self.nodes:
self.nodes[child] = self.AddNode([child, [[0, 0, ""]]])
child_node = self.nodes[child]
self.AddEdge(parent_node, child_node)
return True
except:
print "GraphViewer Error:", sys.exc_info()[1]
return True
def OnGetText(self, node_id):
try:
ea, rows = self[node_id]
if ea in self.colours:
colour = self.colours[ea]
else:
colour = 0xFFFFFF
ret = []
for row in rows:
ret.append(row[2])
label = "\n".join(ret)
return (label, colour)
except:
print "GraphViewer.OnGetText:", sys.exc_info()[1]
return ("ERROR", 0x000000)
def Show(self):
return GraphViewer.Show(self)
#-----------------------------------------------------------------------
g_bindiff = None
def show_choosers():
global g_bindiff
if g_bindiff is not None:
g_bindiff.show_choosers(True)
#-----------------------------------------------------------------------
MAX_PROCESSED_ROWS = 1000000
TIMEOUT_LIMIT = 60 * 2
#-----------------------------------------------------------------------
# Fix for people using IDASkins with very h4x0r $tYl3z like the
# Consonance color scheme
HtmlDiff._styles = """
table.diff {
font-family:Courier;
border:medium;
background-color:#ffffff;
color:#000000
}
.diff_header {background-color:#e0e0e0}
td.diff_header {text-align:right}
.diff_next {background-color:#c0c0c0}
.diff_add {background-color:#aaffaa}
.diff_chg {background-color:#ffff77}
.diff_sub {background-color:#ffaaaa}"""
#-----------------------------------------------------------------------
class CBinDiff:
def __init__(self, db_name):
self.names = dict(Names())
self.primes = primes(1024*1024)
self.db_name = db_name
self.open_db()
self.matched1 = set()
self.matched2 = set()
self.total_functions1 = None
self.total_functions2 = None
self.equal_callgraph = False
self.kfh = CKoretFuzzyHashing()
# With this block size we're sure it will only apply to functions
# somehow big
self.kfh.bsize = 32
self.pseudo = {}
self.pseudo_hash = {}
self.unreliable = False
self.relaxed_ratio = False
self.experimental = False
self.slow_heuristics = False
self.use_decompiler_always = True
self.best_chooser = None
self.partial_chooser = None
self.unreliable_chooser = None
self.unmatched_second = None
self.unmatched_primary = None
self.last_diff_db = None
####################################################################
# LIMITS
#
# Do not run heuristics for more than 2 minutes per each 20.000
# functions.
self.timeout = TIMEOUT_LIMIT
# It's typical in SQL queries to get a cartesian product of the
# results in the functions tables. Do not process more than this
# value per each 20k functions.
self.max_processed_rows = MAX_PROCESSED_ROWS
# Limits to filter the functions to export
self.min_ea = MinEA()
self.max_ea = MaxEA()
# Export only non IDA automatically generated function names? I.e.,
# excluding these starting with sub_*
self.ida_subs = True
# Export only function summaries instead of also exporting both the
# basic blocks and all instructions used by functions?
self.function_summaries_only = False
# Ignore IDA's automatically generated sub_* names for heuristics
# like the 'Same name'?
self.ignore_sub_names = True
# Ignore any and all function names for the 'Same name' heuristic?
self.ignore_all_names = True
####################################################################
def __del__(self):
if self.db is not None:
try:
if self.last_diff_db is not None:
with self.db.cursor():
cur.execute('detach "%s"' % self.last_diff_db)
except:
pass
self.db_close()
def open_db(self):
self.db = sqlite3.connect(self.db_name)
self.db.text_factory = str
self.create_schema()
def db_cursor(self):
if self.db is None:
self.open_db()
return self.db.cursor()
def db_close(self):
self.db.close()
self.db = None
def create_schema(self):
cur = self.db_cursor()
cur.execute("PRAGMA foreign_keys = ON")
sql = """ create table if not exists functions (
id integer primary key,
name varchar(255),
address text unique,
nodes integer,
edges integer,
indegree integer,
outdegree integer,
size integer,
instructions integer,
mnemonics text,
names text,
prototype text,
cyclomatic_complexity integer,
primes_value text,
comment text,
mangled_function text,
bytes_hash text,
pseudocode text,
pseudocode_lines integer,
pseudocode_hash1 text,
pseudocode_primes text,
function_flags integer,
assembly text,
prototype2 text,
pseudocode_hash2 text,
pseudocode_hash3 text,
strongly_connected integer,
loops integer,
rva text unique,
tarjan_topological_sort text,
strongly_connected_spp text,
clean_assembly text,
clean_pseudo text,
mnemonics_spp text) """
cur.execute(sql)
sql = """ create table if not exists program (
id integer primary key,
callgraph_primes text,
callgraph_all_primes text,
md5sum text
) """
cur.execute(sql)
sql = """ create table if not exists program_data (
id integer primary key,
name varchar(255),
type varchar(255),
value text
)"""
cur.execute(sql)
sql = """ create table if not exists version (value text) """
cur.execute(sql)
sql = """ create table if not exists instructions (
id integer primary key,
address text unique,
disasm text,
mnemonic text,
comment1 text,
comment2 text) """
cur.execute(sql)
sql = "create index if not exists idx_instructions_address on instructions (address)"
cur.execute(sql)
sql = """ create table if not exists basic_blocks (
id integer primary key,
num integer,
address text unique)"""
cur.execute(sql)
sql = """ create table if not exists bb_relations (
id integer primary key,
parent_id integer not null references basic_blocks(id) ON DELETE CASCADE,
child_id integer not null references basic_blocks(id) ON DELETE CASCADE)"""
cur.execute(sql)
sql = "create index if not exists idx_bb_relations on bb_relations(parent_id, child_id)"
cur.execute(sql)
sql = """ create table if not exists bb_instructions (
id integer primary key,
basic_block_id integer references basic_blocks(id) on delete cascade,
instruction_id integer references instructions(id) on delete cascade)"""
cur.execute(sql)
sql = "create index if not exists idx_bb_instructions on bb_instructions (basic_block_id, instruction_id)"
cur.execute(sql)
sql = """ create table if not exists function_bblocks (
id integer primary key,
function_id integer not null references functions(id) on delete cascade,
basic_block_id integer not null references basic_blocks(id) on delete cascade)"""
cur.execute(sql)
sql = "create index if not exists id_function_blocks on function_bblocks (function_id, basic_block_id)"
cur.execute(sql)
cur.execute("select 1 from version")
row = cur.fetchone()
if not row:
cur.execute("insert into main.version values ('%s')" % VERSION_VALUE)
sql = "create index if not exists idx_assembly on functions(assembly)"
cur.execute(sql)
sql = "create index if not exists idx_bytes_hash on functions(bytes_hash)"
cur.execute(sql)
sql = "create index if not exists idx_pseudocode on functions(pseudocode)"
cur.execute(sql)
sql = "create index if not exists idx_name on functions(name)"
cur.execute(sql)
sql = "create index if not exists idx_mangled_name on functions(mangled_function)"
cur.execute(sql)
sql = "create index if not exists idx_names on functions(names)"
cur.execute(sql)
sql = "create index if not exists idx_asm_pseudo on functions(assembly, pseudocode)"
cur.execute(sql)
sql = "create index if not exists idx_nodes_edges_instructions on functions(nodes, edges, instructions)"
cur.execute(sql)
sql = "create index if not exists idx_composite1 on functions(nodes, edges, mnemonics, names, cyclomatic_complexity, prototype2, indegree, outdegree)"
cur.execute(sql)
sql = "create index if not exists idx_composite2 on functions(instructions, mnemonics, names)"
cur.execute(sql)
sql = "create index if not exists idx_composite3 on functions(nodes, edges, cyclomatic_complexity)"
cur.execute(sql)
sql = "create index if not exists idx_composite4 on functions(pseudocode_lines, pseudocode)"
cur.execute(sql)
sql = "create index if not exists idx_composite5 on functions(pseudocode_lines, pseudocode_primes)"
cur.execute(sql)
sql = "create index if not exists idx_composite6 on functions(names, mnemonics)"
cur.execute(sql)
sql = "create index if not exists idx_pseudocode_hash1 on functions(pseudocode_hash1)"
cur.execute(sql)
sql = "create index if not exists idx_pseudocode_hash2 on functions(pseudocode_hash2)"
cur.execute(sql)
sql = "create index if not exists idx_pseudocode_hash3 on functions(pseudocode_hash3)"
cur.execute(sql)
sql = "create index if not exists idx_pseudocode_hash on functions(pseudocode_hash1, pseudocode_hash2, pseudocode_hash3)"
cur.execute(sql)
sql = "create index if not exists idx_strongly_connected on functions(strongly_connected)"
cur.execute(sql)
sql = "create index if not exists idx_strongly_connected_spp on functions(strongly_connected_spp)"
cur.execute(sql)
sql = "create index if not exists idx_loops on functions(loops)"
cur.execute(sql)
sql = "create index if not exists idx_rva on functions(rva)"
cur.execute(sql)
sql = "create index if not exists idx_tarjan_topological_sort on functions(tarjan_topological_sort)"
cur.execute(sql)
sql = "create index if not exists idx_mnemonics_spp on functions(mnemonics_spp)"
cur.execute(sql)
cur.close()
def add_program_data(self, type_name, key, value):
cur = self.db_cursor()
sql = "insert into main.program_data (name, type, value) values (?, ?, ?)"
values = (key, type_name, value)
cur.execute(sql, values)
cur.close()
def read_function(self, f, discard=False):
name = GetFunctionName(int(f))
true_name = name
demangled_name = Demangle(name, INF_SHORT_DN)
if demangled_name is not None:
name = demangled_name
f = int(f)
func = get_func(f)
flow = FlowChart(func)
size = func.endEA - func.startEA
if not self.ida_subs:
# Unnamed function, ignore it...
if name.startswith("sub_") or name.startswith("j_") or name.startswith("unknown"):
return False
# Already recognized runtime's function?
flags = GetFunctionFlags(f)
if flags & FUNC_LIB or flags == -1:
return False
nodes = 0
edges = 0
instructions = 0
mnems = []
dones = {}
names = set()
bytes_hash = []
outdegree = 0
indegree = len(list(CodeRefsTo(f, 1)))
assembly = {}
basic_blocks_data = {}
bb_relations = {}
bb_topo_num = {}
bb_topological = {}
mnemonics_spp = 1
cpu_ins_list = GetInstructionList()
image_base = self.get_base_address()
for block in flow:
nodes += 1
instructions_data = []
block_ea = block.startEA - image_base
idx = len(bb_topological)
bb_topological[idx] = []
bb_topo_num[block_ea] = idx
for x in list(Heads(block.startEA, block.endEA)):
mnem = GetMnem(x)
disasm = GetDisasm(x)
if mnem in cpu_ins_list:
mnemonics_spp += self.primes[cpu_ins_list.index(mnem)]
try:
assembly[block_ea].append(disasm)
except KeyError:
if nodes == 1:
assembly[block_ea] = [disasm]
else:
assembly[block_ea] = ["loc_%x:" % x, disasm]
instructions += 1
bytes_hash.append(chr(Byte(x)))
outdegree += len(list(CodeRefsFrom(x, 0)))
mnems.append(mnem)
op_value = GetOperandValue(x, 1)
if op_value == BADADDR:
op_value = GetOperandValue(x, 0)
if op_value != BADADDR and op_value in self.names:
tmp_name = self.names[op_value]
demangled_name = Demangle(name, INF_SHORT_DN)
if demangled_name is not None:
tmp_name = demangled_name
if not tmp_name.startswith("sub_"):
names.add(tmp_name)
ins_cmt1 = GetCommentEx(x, 0)
ins_cmt2 = GetCommentEx(x, 1)
instructions_data.append([x - image_base, mnem, disasm, ins_cmt1, ins_cmt2])
basic_blocks_data[block_ea] = instructions_data
bb_relations[block_ea] = []
for succ_block in block.succs():
succ_base = succ_block.startEA - image_base
bb_relations[block_ea].append(succ_base)
edges += 1
indegree += 1
if not dones.has_key(succ_block.id):
dones[succ_block] = 1
for pred_block in block.preds():
try:
bb_relations[pred_block.startEA - image_base].append(block.startEA - image_base)
except KeyError:
bb_relations[pred_block.startEA - image_base] = [block.startEA - image_base]
edges += 1
outdegree += 1
if not dones.has_key(succ_block.id):
dones[succ_block] = 1
for block in flow:
block_ea = block.startEA - image_base
for succ_block in block.succs():
succ_base = succ_block.startEA - image_base
bb_topological[bb_topo_num[block_ea]].append(bb_topo_num[succ_base])
strongly_connected_spp = 0
try:
strongly_connected = strongly_connected_components(bb_relations)
bb_topological = robust_topological_sort(bb_topological)
bb_topological = json.dumps(bb_topological)
strongly_connected_spp = 1
for item in strongly_connected:
val = len(item)
if val > 1:
strongly_connected_spp *= self.primes[val]
except:
# XXX: FIXME: The original implementation that we're using is
# recursive and can fail. We really need to create our own non
# recursive version.
strongly_connected = []
bb_topological = None
loops = 0
for sc in strongly_connected:
if len(sc) > 1:
loops += 1
else:
if sc[0] in bb_relations and sc[0] in bb_relations[sc[0]]:
loops += 1
keys = assembly.keys()
keys.sort()
asm = []
for key in keys:
asm.extend(assembly[key])
asm = "\n".join(asm)
cc = edges - nodes + 2
proto = self.guess_type(f)
proto2 = GetType(f)
prime = str(self.primes[cc])
comment = GetFunctionCmt(f, 1)
bytes_hash = md5("".join(bytes_hash)).hexdigest()
function_flags = GetFunctionFlags(f)
pseudo = None
pseudo_hash1 = None
pseudo_hash2 = None
pseudo_hash3 = None
pseudo_lines = 0
pseudocode_primes = None
if f in self.pseudo:
pseudo = "\n".join(self.pseudo[f])
pseudo_lines = len(self.pseudo[f])
pseudo_hash1, pseudo_hash2, pseudo_hash3 = self.kfh.hash_bytes(pseudo).split(";")
if pseudo_hash1 == "":
pseudo_hash1 = None
if pseudo_hash2 == "":
pseudo_hash2 = None
if pseudo_hash3 == "":
pseudo_hash3 = None
pseudocode_primes = str(self.pseudo_hash[f])
clean_assembly = self.get_cmp_asm_lines(asm)
clean_pseudo = self.get_cmp_pseudo_lines(pseudo)
rva = f - self.get_base_address()
return (name, nodes, edges, indegree, outdegree, size, instructions, mnems, names,
proto, cc, prime, f, comment, true_name, bytes_hash, pseudo, pseudo_lines,
pseudo_hash1, pseudocode_primes, function_flags, asm, proto2,
pseudo_hash2, pseudo_hash3, len(strongly_connected), loops, rva, bb_topological,
strongly_connected_spp, clean_assembly, clean_pseudo, mnemonics_spp,
basic_blocks_data, bb_relations)
def get_base_address(self):
return idaapi.get_imagebase()
def get_instruction_id(self, addr):
cur = self.db_cursor()
sql = "select id from instructions where address = ?"
cur.execute(sql, (str(addr),))
row = cur.fetchone()
rowid = None
if row is not None:
rowid = row[0]
cur.close()
return rowid
def get_bb_id(self, addr):
cur = self.db_cursor()
sql = "select id from basic_blocks where address = ?"
cur.execute(sql, (str(addr),))
row = cur.fetchone()
rowid = None
if row is not None:
rowid = row[0]
cur.close()
return rowid
def save_function(self, props):
cur = self.db_cursor()
new_props = []
for prop in props[:len(props)-2]:
# XXX: Fixme! This is a hack for 64 bit architectures kernels
if type(prop) is long and prop > 0xFFFFFFFF:
prop = str(prop)
if type(prop) is list or type(prop) is set:
new_props.append(json.dumps(list(prop)))
else:
new_props.append(prop)
sql = """insert into main.functions (name, nodes, edges, indegree, outdegree, size,
instructions, mnemonics, names, prototype,
cyclomatic_complexity, primes_value, address,
comment, mangled_function, bytes_hash, pseudocode,
pseudocode_lines, pseudocode_hash1, pseudocode_primes,
function_flags, assembly, prototype2, pseudocode_hash2,
pseudocode_hash3, strongly_connected, loops, rva,
tarjan_topological_sort, strongly_connected_spp,
clean_assembly, clean_pseudo, mnemonics_spp)
values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?,
?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?,
?, ?, ?)"""
cur.execute(sql, new_props)
func_id = cur.lastrowid
if not self.function_summaries_only:
bb_data, bb_relations = props[len(props)-2:]
instructions_ids = {}
sql = """insert into main.instructions (address, mnemonic, disasm, comment1, comment2)
values (?, ?, ?, ?, ?)"""
self_get_instruction_id = self.get_instruction_id
cur_execute = cur.execute
for key in bb_data:
for insn in bb_data[key]:
addr, mnem, disasm, cmt1, cmt2 = insn
db_id = self_get_instruction_id(str(addr))
if db_id is None:
cur_execute(sql, (str(addr), mnem, disasm, cmt1, cmt2))
db_id = cur.lastrowid
instructions_ids[addr] = db_id
num = 0
bb_ids = {}
sql1 = "insert into main.basic_blocks (num, address) values (?, ?)"
sql2 = "insert into main.bb_instructions (basic_block_id, instruction_id) values (?, ?)"
self_get_bb_id = self.get_bb_id
for key in bb_data:
# Insert each basic block
num += 1
ins_ea = str(key)
last_bb_id = self_get_bb_id(ins_ea)
if last_bb_id is None:
cur_execute(sql1, (num, ins_ea))
last_bb_id = cur.lastrowid
bb_ids[ins_ea] = last_bb_id
# Insert relations between basic blocks and instructions
for insn in bb_data[key]:
ins_id = instructions_ids[insn[0]]
cur_execute(sql2, (last_bb_id, ins_id))
# Insert relations between basic blocks
sql = "insert into main.bb_relations (parent_id, child_id) values (?, ?)"
for key in bb_relations:
for bb in bb_relations[key]:
bb = str(bb)
key = str(key)
cur_execute(sql, (bb_ids[key], bb_ids[bb]))
# And finally insert the functions to basic blocks relations
sql = "insert into main.function_bblocks (function_id, basic_block_id) values (?, ?)"
for key in bb_ids:
bb_id = bb_ids[key]
cur_execute(sql, (func_id, bb_id))
cur.close()
def save_callgraph(self, primes, all_primes, md5sum):
cur = self.db_cursor()
sql = "insert into main.program (callgraph_primes, callgraph_all_primes, md5sum) values (?, ?, ?)"
cur.execute(sql, (primes, all_primes, md5sum))
cur.close()
def export_structures(self):
# It seems that GetMaxLocalType, sometimes, can return negative
# numbers, according to one beta-tester. My guess is that it's a bug
# in IDA. However, as we cannot reproduce, at least handle this
# condition.
local_types = GetMaxLocalType()
if (local_types & 0x80000000) != 0:
log("Warning: GetMaxLocalType returned a negative number (0x%x)!" % local_types)
return
for i in range(local_types):
try:
name = GetLocalTypeName(i+1)
definition = GetLocalType(i+1, PRTYPE_MULTI|PRTYPE_TYPE|PRTYPE_SEMI|PRTYPE_PRAGMA)
type_name = "struct"
if definition.startswith("enum"):
type_name = "enum"
self.add_program_data(type_name, name, definition)
except:
pass
def get_til_names(self):
idb_path = GetIdbPath()
filename, ext = os.path.splitext(idb_path)
til_path = "%s.til" % filename
with open(til_path, "rb") as f:
line = f.readline()
pos = line.find("Local type definitions")
if pos > -1:
tmp = line[pos+len("Local type definitions")+1:]
pos = tmp.find("\x00")
if pos > -1:
defs = tmp[:pos].split(",")
return defs
return None
def export_til(self):
til_names = self.get_til_names()
if til_names is not None:
for til in til_names:
self.add_program_data("til", til, None)
def do_export(self):
i = 0
callgraph_primes = 1
callgraph_all_primes = {}
func_list = list(Functions(self.min_ea, self.max_ea))
total_funcs = len(func_list)
t = time.time()
for func in func_list:
i += 1
if i % 100 == 0 or i == 1:
line = "Exported %d function(s) out of %d total.\nElapsed %d second(s), remaining ~%d second(s)"
elapsed = time.time() - t
remaining = (elapsed / i) * (total_funcs - i)
replace_wait_box(line % (i, total_funcs, int(elapsed), int(remaining)))
props = self.read_function(func)
if props == False:
continue
ret = props[11]
callgraph_primes *= decimal.Decimal(ret)
try:
callgraph_all_primes[ret] += 1
except KeyError:
callgraph_all_primes[ret] = 1
self.save_function(props)
md5sum = GetInputFileMD5()
self.save_callgraph(str(callgraph_primes), json.dumps(callgraph_all_primes), md5sum)
self.export_structures()
self.export_til()
def export(self):
try:
show_wait_box("Exporting database")
self.do_export()
finally:
hide_wait_box()
self.db.commit()
cur = self.db_cursor()
cur.execute("analyze")
cur.close()
self.db_close()
def import_til(self):
log("Importing type libraries...")
cur = self.db_cursor()
sql = "select name from diff.program_data where type = 'til'"
cur.execute(sql)
for row in cur.fetchall():
LoadTil(row[0])
cur.close()
Wait()
def get_valid_definition(self, defs):
""" Try to get a valid structure definition by removing (yes) the
invalid characters typically found in IDA's generated structs."""
ret = defs.replace("?", "_").replace("@", "_")
ret = ret.replace("$", "_")
return ret
def import_definitions(self):
cur = self.db_cursor()
sql = "select type, name, value from diff.program_data where type in ('structure', 'struct', 'enum')"
cur.execute(sql)
rows = cur.fetchall()
new_rows = set()
for row in rows:
if GetStrucIdByName(row[1]) == BADADDR:
type_name = "struct"
if row[0] == "enum":
type_name = "enum"
new_rows.add(row)
ret = ParseTypes("%s %s;" % (type_name, row[1]))
if ret != 0:
pass
for i in xrange(10):
for row in new_rows:
if GetStrucIdByName(row[1]) == BADADDR:
definition = self.get_valid_definition(row[2])
ret = ParseTypes(definition)
if ret != 0:
pass
cur.close()
Wait()
def import_one(self, item):
ret = askyn_c(1, "AUTOHIDE DATABASE\nDo you want to import all the type libraries, structs and enumerations?")
if ret == 1:
# Import all the type libraries from the diff database
self.import_til()
# Import all the struct and enum definitions
self.import_definitions()
elif ret == -1:
return
# Import just the selected item
ea1 = str(int(item[1], 16))
ea2 = str(int(item[3], 16))
self.do_import_one(ea1, ea2, True)
new_func = self.read_function(str(ea1))
self.delete_function(ea1)
self.save_function(new_func)
self.db.commit()
def prettify_asm(self, asm_source):
asm = []
for line in asm_source.split("\n"):
if not line.startswith("loc_"):
asm.append("\t" + line)
else:
asm.append(line)
return "\n".join(asm)
def show_asm_diff(self, item):
cur = self.db_cursor()
sql = """select *
from (
select prototype, assembly, name, 1
from functions
where address = ?
and assembly is not null
union select prototype, assembly, name, 2
from diff.functions
where address = ?
and assembly is not null)
order by 4 asc"""
ea1 = str(int(item[1], 16))
ea2 = str(int(item[3], 16))
cur.execute(sql, (ea1, ea2))
rows = cur.fetchall()
if len(rows) != 2:
Warning("Sorry, there is no assembly available for either the first or the second database.")
else:
row1 = rows[0]
row2 = rows[1]
html_diff = HtmlDiff()
asm1 = self.prettify_asm(row1[1])
asm2 = self.prettify_asm(row2[1])
buf1 = "%s proc near\n%s\n%s endp" % (row1[2], asm1, row1[2])
buf2 = "%s proc near\n%s\n%s endp" % (row2[2], asm2, row2[2])
src = html_diff.make_file(buf1.split("\n"), buf2.split("\n"))
title = "Diff assembler %s - %s" % (row1[2], row2[2])
cdiffer = CHtmlViewer()
cdiffer.Show(src, title)
cur.close()
def show_asm(self, item, primary):
cur = self.db_cursor()
if primary:
db = "main"
else:
db = "diff"
ea = str(int(item[1], 16))
sql = "select prototype, assembly, name from %s.functions where address = ?"
sql = sql % db
cur.execute(sql, (ea, ))
row = cur.fetchone()
if row is None:
Warning("Sorry, there is no assembly available for the selected function.")
else:
fmt = HtmlFormatter()
fmt.noclasses = True
fmt.linenos = True
asm = self.prettify_asm(row[1])
final_asm = "; %s\n%s proc near\n%s\n%s endp\n"
final_asm = final_asm % (row[0], row[2], asm, row[2])
src = highlight(final_asm, NasmLexer(), fmt)
title = "Assembly for %s" % row[2]
cdiffer = CHtmlViewer()
cdiffer.Show(src, title)
cur.close()
def get_cmp_asm_lines(self, asm):
sio = StringIO(asm)
lines = []
get_cmp_asm = self.get_cmp_asm
for line in sio.readlines():
line = line.strip("\n")
lines.append(get_cmp_asm(line))
return "\n".join(lines)
def get_cmp_pseudo_lines(self, pseudo):
if pseudo is None:
return pseudo
# Remove all the comments
tmp = re.sub(" // .*", "", pseudo)
# Now, replace sub_, byte_, word_, dword_, loc_, etc...
for rep in CMP_REPS:
tmp = re.sub(rep + "[a-f0-9A-F]+", rep + "XXXX", tmp)
tmp = re.sub("v[0-9]+", "vXXX", tmp)
tmp = re.sub("a[0-9]+", "aXXX", tmp)
return tmp
def get_cmp_asm(self, asm):
if asm is None:
return asm
tmp = asm.split(";")[0]
tmp = asm.split(" # ")[0]
# Now, replace sub_, byte_, word_, dword_, loc_, etc...
for rep in CMP_REPS:
tmp = re.sub(rep + "[a-f0-9A-F]+", "XXXX", tmp)
reps = ["\+[a-f0-9A-F]+h\+"]
for rep in reps:
tmp = re.sub(rep, "+XXXX+", tmp)
tmp = re.sub("\.\.[a-f0-9A-F]{8}", "XXX", tmp)
return tmp
def compare_graphs_pass(self, bblocks1, bblocks2, colours1, colours2, is_second = False):
dones1 = set()
dones2 = set()
# Now compare each basic block from the first function to all the
# basic blocks in the 2nd function
for key1 in bblocks1:
if key1 in dones1:
continue
for key2 in bblocks2:
if key2 in dones2:
continue
# Same number of instructions?
if len(bblocks1[key1]) == len(bblocks2[key2]):
mod = False
partial = True
i = 0
for ins1 in bblocks1[key1]:
ins2 = bblocks2[key2][i]
# Same mnemonic? The change can be only partial
if ins1[1] != ins2[1]:
partial = False
# Try to compare the assembly after doing some cleaning
cmp_asm1 = self.get_cmp_asm(ins1[2])
cmp_asm2 = self.get_cmp_asm(ins2[2])
if cmp_asm1 != cmp_asm2:
mod = True
if not partial:
continue
i += 1
if not mod:
# Perfect match, we discovered a basic block equal in both
# functions
colours1[key1] = 0xffffff
colours2[key2] = 0xffffff
dones1.add(key1)
dones2.add(key2)
break
elif not is_second and partial:
# Partial match, we discovered a basic block with the same
# mnemonics but something changed
#
# NOTE:
# Do not add the partial matches to the dones lists, as we
# can have complete matches after a partial match!
colours1[key1] = 0xCCffff
colours2[key2] = 0xCCffff
break
return colours1, colours2
def compare_graphs(self, g1, ea1, g2, ea2):
colours1 = {}
colours2 = {}
bblocks1 = g1[0]
bblocks2 = g2[0]
# Consider, by default, all blocks added, news
for key1 in bblocks1:
colours1[key1] = 0xCCCCFF
for key2 in bblocks2:
colours2[key2] = 0xCCCCFF
colours1, colours2 = self.compare_graphs_pass(bblocks1, bblocks2, colours1, colours2, False)
colours1, colours2 = self.compare_graphs_pass(bblocks1, bblocks2, colours1, colours2, True)
return colours1, colours2
def graph_diff(self, ea1, name1, ea2, name2):
g1 = self.get_graph(str(ea1), True)
g2 = self.get_graph(str(ea2))
if g1 == ({}, {}) or g2 == ({}, {}):
Warning("Sorry, graph information is not available for one of the databases.")
return False
colours = self.compare_graphs(g1, ea1, g2, ea2)
title1 = "Graph for %s (primary)" % name1
title2 = "Graph for %s (secondary)" % name2
graph1 = CDiffGraphViewer(title1, g1, colours[0])
graph2 = CDiffGraphViewer(title2, g2, colours[1])
graph1.Show()
graph2.Show()
set_dock_pos(title1, title2, DP_RIGHT)
uitimercallback_t(graph1, 100)
uitimercallback_t(graph2, 100)
def get_graph(self, ea1, primary=False):
if primary:
db = "main"
else:
db = "diff"
cur = self.db_cursor()
dones = set()
sql = """ select bb.address, ins.address, ins.mnemonic, ins.disasm
from %s.function_bblocks fb,
%s.bb_instructions bbins,
%s.instructions ins,
%s.basic_blocks bb,
%s.functions f
where ins.id = bbins.instruction_id
and bbins.basic_block_id = bb.id
and bb.id = fb.basic_block_id
and f.id = fb.function_id
and f.address = ?
order by bb.address asc""" % (db, db, db, db, db)
cur.execute(sql, (ea1,))
bb_blocks = {}
for row in cur.fetchall():
bb_ea = str(int(row[0]))
ins_ea = str(int(row[1]))
mnem = row[2]
dis = row[3]
if ins_ea in dones:
continue
dones.add(ins_ea)
try:
bb_blocks[bb_ea].append([ins_ea, mnem, dis])
except KeyError:
bb_blocks[bb_ea] = [ [ins_ea, mnem, dis] ]
sql = """ select (select address
from %s.basic_blocks
where id = bbr.parent_id),
(select address
from %s.basic_blocks
where id = bbr.child_id)
from %s.bb_relations bbr,
%s.function_bblocks fbs,
%s.basic_blocks bbs,
%s.functions f
where f.id = fbs.function_id
and bbs.id = fbs.basic_block_id
and fbs.basic_block_id = bbr.child_id
and f.address = ?
order by 1 asc, 2 asc""" % (db, db, db, db, db, db)
cur.execute(sql, (ea1, ))
rows = cur.fetchall()
bb_relations = {}
for row in rows:
bb_ea1 = str(row[0])
bb_ea2 = str(row[1])
try:
bb_relations[bb_ea1].add(bb_ea2)
except KeyError:
bb_relations[bb_ea1] = set([bb_ea2])
cur.close()
return bb_blocks, bb_relations
def show_pseudo(self, item, primary):
cur = self.db_cursor()
if primary:
db = "main"
else:
db = "diff"
ea = str(int(item[1], 16))
sql = "select prototype, pseudocode, name from %s.functions where address = ?"
sql = sql % db
cur.execute(sql, (str(ea), ))
row = cur.fetchone()
if row is None:
Warning("Sorry, there is no pseudo-code available for the selected function.")
else:
fmt = HtmlFormatter()
fmt.noclasses = True
fmt.linenos = True
func = "%s\n%s" % (row[0], row[1])
src = highlight(func, CppLexer(), fmt)
title = "Pseudo-code for %s" % row[2]
cdiffer = CHtmlViewer()
cdiffer.Show(src, title)
cur.close()
def show_pseudo_diff(self, item):
cur = self.db_cursor()
sql = """select *
from (
select prototype, pseudocode, name, 1
from functions
where address = ?
and pseudocode is not null
union select prototype, pseudocode, name, 2
from diff.functions
where address = ?
and pseudocode is not null)
order by 4 asc"""
ea1 = str(int(item[1], 16))
ea2 = str(int(item[3], 16))
cur.execute(sql, (ea1, ea2))
rows = cur.fetchall()
if len(rows) != 2:
Warning("Sorry, there is no pseudo-code available for either the first or the second database.")
else:
row1 = rows[0]
row2 = rows[1]
html_diff = HtmlDiff()
buf1 = row1[0] + "\n" + row1[1]
buf2 = row2[0] + "\n" + row2[1]
src = html_diff.make_file(buf1.split("\n"), buf2.split("\n"))
title = "Diff pseudo-code %s - %s" % (row1[2], row2[2])
cdiffer = CHtmlViewer()
cdiffer.Show(src, title)
cur.close()
def delete_function(self, ea):
cur = self.db_cursor()
cur.execute("delete from functions where address = ?", (ea, ))
cur.close()
def do_import_one(self, ea1, ea2, force = False):
cur = self.db_cursor()
sql = "select prototype, comment, mangled_function, function_flags from diff.functions where address = ?"
cur.execute(sql, (ea2,))
row = cur.fetchone()
if row is not None:
proto = row[0]
comment = row[1]
name = row[2]
flags = row[3]
ea1 = int(ea1)
if not name.startswith("sub_") or force:
if not MakeNameEx(ea1, name, SN_NOWARN|SN_NOCHECK):
for i in xrange(10):
if MakeNameEx(ea1, "%s_%d" % (name, i), SN_NOWARN|SN_NOCHECK):
break
if proto is not None and proto != "int()":
SetType(ea1, proto)
if comment is not None and comment != "":
SetFunctionCmt(ea1, comment, 1)
if flags is not None:
SetFunctionFlags(ea1, flags)
cur.close()
def import_selected(self, items, selected):
# Import all the type libraries from the diff database
self.import_til()
# Import all the struct and enum definitions
self.import_definitions()
new_items = []
for item in selected:
new_items.append(items[item-1])
self.import_items(new_items)
def import_items(self, items):
to_import = set()
# Import all the function names and comments
for item in items:
ea1 = str(int(item[1], 16))
ea2 = str(int(item[3], 16))
self.do_import_one(ea1, ea2)
to_import.add(ea1)
try:
show_wait_box("Updating primary database...")
for ea in to_import:
ea = str(ea)
new_func = self.read_function(ea)
self.delete_function(ea)
self.save_function(new_func)
self.db.commit()
finally:
hide_wait_box()
def do_import_all(self, items):
# Import all the type libraries from the diff database
self.import_til()
# Import all the struct and enum definitions
self.import_definitions()
# Import all the items in the chooser
self.import_items(items)
def do_import_all_auto(self, items):
# Import all the type libraries from the diff database
self.import_til()
# Import all the struct and enum definitions
self.import_definitions()
# Import all the items in the chooser for sub_* functions
new_items = []
for item in items:
name1 = item[2]
if name1.startswith("sub_"):
new_items.append(item)
self.import_items(new_items)
def re_diff(self):
self.best_chooser.Close()
self.partial_chooser.Close()
if self.unreliable_chooser is not None:
self.unreliable_chooser.Close()
if self.unmatched_primary is not None:
self.unmatched_primary.Close()
if self.unmatched_second is not None:
self.unmatched_second.Close()
ret = askyn_c(1, "Do you want to show only the new matches?")
if ret == -1:
return
elif ret == 0:
self.matched1 = set()
self.matched2 = set()
self.diff(self.last_diff_db)
def import_all(self, items):
try:
self.do_import_all(items)
msg = "AUTOHIDE DATABASE\nHIDECANCEL\nAll functions were imported. Do you want to relaunch the diffing process?"
if askyn_c(1, msg) == 1:
self.db.execute("detach diff")
# We cannot run that code here or otherwise IDA will crash corrupting the stack
timeraction_t(self.re_diff, None, 1000)
except:
log("import_all(): %s" % str(sys.exc_info()[1]))
traceback.print_exc()
def import_all_auto(self, items):
try:
self.do_import_all_auto(items)
except:
log("import_all(): %s" % str(sys.exc_info()[1]))
traceback.print_exc()
def equal_db(self):
cur = self.db_cursor()
sql = "select count(*) from program p, diff.program dp where p.md5sum = dp.md5sum"
cur.execute(sql)
row = cur.fetchone()
ret = row[0] == 1
if not ret:
sql = "select count(*) from (select * from functions except select * from diff.functions) x"
cur.execute(sql)
row = cur.fetchone()
else:
log("Same MD5 in both databases")
cur.close()
return row[0] == 0
def check_callgraph(self):
cur = self.db_cursor()
sql = """select callgraph_primes, callgraph_all_primes from program
union all
select callgraph_primes, callgraph_all_primes from diff.program"""
cur.execute(sql)
rows = cur.fetchall()
if len(rows) == 2:
cg1 = decimal.Decimal(rows[0][0])
cg_factors1 = json.loads(rows[0][1])
cg2 = decimal.Decimal(rows[1][0])
cg_factors2 = json.loads(rows[1][1])
if cg1 == cg2:
self.equal_callgraph = True
log("Callgraph signature for both databases is equal, the programs seem to be 100% equal structurally")
Warning("Callgraph signature for both databases is equal, the programs seem to be 100% equal structurally")
else:
FACTORS_CACHE[cg1] = cg_factors1
FACTORS_CACHE[cg2] = cg_factors2
diff = difference(cg1, cg2)
total = sum(cg_factors1.values())
percent = diff * 100. / total
log("Callgraphs from both programs differ in %f%%" % percent)
cur.close()
def find_equal_matches(self):
cur = self.db_cursor()
# Start by calculating the total number of functions in both databases
sql = """select count(*) total1 from functions
union all
select count(*) total2 from diff.functions"""
cur.execute(sql)
rows = cur.fetchall()
if len(rows) != 2:
Warning("Malformed database, only %d rows!" % len(rows))
raise Exception("Malformed database!")
self.total_functions1 = rows[0][0]
self.total_functions2 = rows[1][0]
sql = "select address, mangled_function from (select * from functions intersect select * from diff.functions) x"
cur.execute(sql)
rows = cur.fetchall()
choose = self.best_chooser
if len(rows) > 0:
for row in rows:
name = row[1]
ea = LocByName(name)
ea2 = row[0]
choose.add_item(CChooser.Item(ea, name, ea2, name, "100% equal", 1))
self.matched1.add(name)
self.matched2.add(name)
if self.equal_callgraph and not self.ignore_all_names:
self.find_same_name(self.partial_chooser)
sql = """select f.address, f.name, df.address, df.name, 'Equal pseudo-code' description
from functions f,
diff.functions df
where f.pseudocode = df.pseudocode
and df.pseudocode is not null
and f.pseudocode_lines >= 5
union
select f.address, f.name, df.address, df.name, 'Equal assembly' description
from functions f,
diff.functions df
where f.assembly = df.assembly
and df.assembly is not null
"""
log_refresh("Finding with heuristic 'Equal assembly or pseudo-code'")
self.add_matches_from_query(sql, choose)
sql = """ select distinct f.address ea, f.name name1, df.address ea2, df.name name2,
'Bytes hash and names' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.names = df.names
and f.bytes_hash = df.bytes_hash
and f.names != '[]'"""
log_refresh("Finding with heuristic 'Bytes hash and names'")
self.add_matches_from_query_ratio(sql, self.best_chooser, self.partial_chooser, self.unreliable_chooser)
sql = """ select distinct f.address ea, f.name name1, df.address ea2, df.name name2,
'Same cleaned up assembly or pseudo-code' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.clean_assembly = df.clean_assembly
or f.clean_pseudo = df.clean_pseudo"""
log_refresh("Finding with heuristic 'Same cleaned up assembly or pseudo-code'")
self.add_matches_from_query_ratio(sql, self.best_chooser, self.partial_chooser, self.unreliable_chooser)
sql = """select f.address, f.name, df.address, df.name, 'Same address, nodes, edges and mnemonics' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.rva = df.rva
and f.instructions = df.instructions
and f.nodes = df.nodes
and f.edges = df.edges
and f.mnemonics = df.mnemonics"""
log_refresh("Finding with heuristic 'Same address, nodes, edges and mnemonics'")
self.add_matches_from_query_ratio(sql, self.best_chooser, self.partial_chooser, None)
cur.close()
def decompile_and_get(self, ea):
if not init_hexrays_plugin():
return False
f = get_func(ea)
if f is None:
return False
cfunc = decompile(f);
if cfunc is None:
# Failed to decompile
return False
visitor = CAstVisitor(cfunc)
visitor.apply_to(cfunc.body, None)
self.pseudo_hash[ea] = visitor.primes_hash
sv = cfunc.get_pseudocode();
self.pseudo[ea] = []
first_line = None
for sline in sv:
line = tag_remove(sline.line);
if line.startswith("//"):
continue
if first_line is None:
first_line = line
else:
self.pseudo[ea].append(line)
return first_line
def guess_type(self, ea):
t = GuessType(ea)
if not self.use_decompiler_always:
return t
else:
try:
ret = self.decompile_and_get(ea)
if ret:
t = ret
except:
log("Cannot decompile 0x%x: %s" % (ea, str(sys.exc_info()[1])))
return t
def ast_ratio(self, ast1, ast2):
if not self.relaxed_ratio:
return 0
return ast_ratio(ast1, ast2)
def check_ratio(self, ast1, ast2, pseudo1, pseudo2, asm1, asm2):
fratio = quick_ratio
decimal_values = "{0:.2f}"
if self.relaxed_ratio:
fratio = real_quick_ratio
decimal_values = "{0:.1f}"
v3 = 0
ast_done = False
if self.relaxed_ratio and ast1 is not None and ast2 is not None and max(len(ast1), len(ast2)) < 16:
ast_done = True
v3 = self.ast_ratio(ast1, ast2)
if v3 == 1:
return 1.0
v1 = 0
if pseudo1 is not None and pseudo2 is not None and pseudo1 != "" and pseudo2 != "":
tmp1 = self.get_cmp_pseudo_lines(pseudo1)
tmp2 = self.get_cmp_pseudo_lines(pseudo2)
if tmp1 == "" or tmp2 == "":
log("Error cleaning pseudo-code!")
print tmp1
print tmp2
else:
v1 = fratio(tmp1, tmp2)
v1 = float(decimal_values.format(v1))
if v1 == 1.0:
# If real_quick_ratio returns 1 try again with quick_ratio
# because it can result in false positives. If real_quick_ratio
# says 'different', there is no point in continuing.
if fratio == real_quick_ratio:
v1 = quick_ratio(tmp1, tmp2)
if v1 == 1.0:
return 1.0
tmp_asm1 = self.get_cmp_asm_lines(asm1)
tmp_asm2 = self.get_cmp_asm_lines(asm2)
v2 = fratio(tmp_asm1, tmp_asm2)
v2 = float(decimal_values.format(v2))
if v2 == 1:
# Actually, same as the quick_ratio/real_quick_ratio check done
# with the pseudo-code
if fratio == real_quick_ratio:
v2 = quick_ratio(tmp_asm1, tmp_asm2)
if v2 == 1.0:
return 1.0
if self.relaxed_ratio and not ast_done:
v3 = fratio(ast1, ast2)
v3 = float(decimal_values.format(v3))
if v3 == 1:
return 1.0
r = max(v1, v2, v3)
return r
def all_functions_matched(self):
return len(self.matched1) == self.total_functions1 or \
len(self.matched2) == self.total_functions2
def add_matches_from_query_ratio(self, sql, best, partial, unreliable=None):
if self.all_functions_matched():
return
cur = self.db_cursor()
cur.execute(sql)
i = 0
t = time.time()
while self.max_processed_rows == 0 or (self.max_processed_rows != 0 and i < self.max_processed_rows):
if time.time() - t > self.timeout:
log("Timeout")
break
i += 1
if i % 50000 == 0:
log("Processed %d rows..." % i)
row = cur.fetchone()
if row is None:
break
ea = str(row[0])
name1 = row[1]
ea2 = row[2]
name2 = row[3]
desc = row[4]
pseudo1 = row[5]
pseudo2 = row[6]
asm1 = row[7]
asm2 = row[8]
ast1 = row[9]
ast2 = row[10]
if name1 in self.matched1 or name2 in self.matched2:
continue
r = self.check_ratio(ast1, ast2, pseudo1, pseudo2, asm1, asm2)
if r == 1:
self.best_chooser.add_item(CChooser.Item(ea, name1, ea2, name2, desc, r))
self.matched1.add(name1)
self.matched2.add(name2)
elif r >= 0.5:
partial.add_item(CChooser.Item(ea, name1, ea2, name2, desc, r))
self.matched1.add(name1)
self.matched2.add(name2)
elif r < 5 and unreliable is not None:
unreliable.add_item(CChooser.Item(ea, name1, ea2, name2, desc, r))
self.matched1.add(name1)
self.matched2.add(name2)
else:
partial.add_item(CChooser.Item(ea, name1, ea2, name2, desc, r))
self.matched1.add(name1)
self.matched2.add(name2)
cur.close()
def add_matches_from_query_ratio_max(self, sql, best, partial, val):
if self.all_functions_matched():
return
cur = self.db_cursor()
cur.execute(sql)
i = 0
t = time.time()
while self.max_processed_rows == 0 or (self.max_processed_rows != 0 and i < self.max_processed_rows):
if time.time() - t > self.timeout:
log("Timeout")
break
i += 1
if i % 50000 == 0:
log("Processed %d rows..." % i)
row = cur.fetchone()
if row is None:
break
ea = str(row[0])
name1 = row[1]
ea2 = row[2]
name2 = row[3]
desc = row[4]
pseudo1 = row[5]
pseudo2 = row[6]
asm1 = row[7]
asm2 = row[8]
ast1 = row[9]
ast2 = row[10]
if name1 in self.matched1 or name2 in self.matched2:
continue
r = self.check_ratio(ast1, ast2, pseudo1, pseudo2, asm1, asm2)
if r == 1 and best != self.best_chooser:
self.best_chooser.add_item(CChooser.Item(ea, name1, ea2, name2, desc, r))
self.matched1.add(name1)
self.matched2.add(name2)
elif r > val:
best.add_item(CChooser.Item(ea, name1, ea2, name2, desc, r))
self.matched1.add(name1)
self.matched2.add(name2)
elif partial is not None:
partial.add_item(CChooser.Item(ea, name1, ea2, name2, desc, r))
self.matched1.add(name1)
self.matched2.add(name2)
cur.close()
def add_matches_from_query(self, sql, choose):
""" Warning: use this *only* if the ratio is known to be 1.00 """
if self.all_functions_matched():
return
cur = self.db_cursor()
cur.execute(sql)
i = 0
while 1:
i += 1
if i % 1000 == 0:
log("Processed %d rows..." % i)
row = cur.fetchone()
if row is None:
break
ea = str(row[0])
name1 = row[1]
ea2 = row[2]
name2 = row[3]
desc = row[4]
if name1 in self.matched1 or name2 in self.matched2:
continue
choose.add_item(CChooser.Item(ea, name1, ea2, name2, desc, 1))
self.matched1.add(name1)
self.matched2.add(name2)
cur.close()
def search_small_differences(self, choose):
cur = self.db_cursor()
# Same basic blocks, edges, mnemonics, etc... but different names
sql = """ select distinct f.address ea, f.name name1, df.name name2,
f.names, df.names, df.address ea2
from functions f,
diff.functions df
where f.nodes = df.nodes
and f.edges = df.edges
and f.mnemonics = df.mnemonics
and f.cyclomatic_complexity = df.cyclomatic_complexity
and f.names != '[]'"""
cur.execute(sql)
rows = cur.fetchall()
for row in rows:
ea = str(row[0])
name1 = row[1]
name2 = row[2]
if name1 in self.matched1 or name2 in self.matched2:
continue
s1 = set(json.loads(row[3]))
s2 = set(json.loads(row[4]))
total = max(len(s1), len(s2))
commons = len(s1.intersection(s2))
ratio = (commons * 1.) / total
if ratio >= 0.5:
ea2 = row[5]
choose.add_item(CChooser.Item(ea, name1, ea2, name2, "Nodes, edges, complexity and mnemonics with small differences", ratio))
self.matched1.add(name1)
self.matched2.add(name2)
cur.close()
return
def find_same_name(self, choose):
cur = self.db_cursor()
sql = """select f.address, f.mangled_function, d.address, f.name, d.name, d.mangled_function,
f.pseudocode, d.pseudocode,
f.assembly, d.assembly,
f.pseudocode_primes, d.pseudocode_primes
from functions f,
diff.functions d
where (d.mangled_function = f.mangled_function
or d.name = f.name)"""
log_refresh("Finding with heuristic 'Same name'")
cur.execute(sql)
rows = cur.fetchall()
cur.close()
if len(rows) > 0 and not self.all_functions_matched():
for row in rows:
ea = row[0]
name = row[1]
ea2 = row[2]
name1 = row[3]
name2 = row[4]
name2_1 = row[5]
if name in self.matched1 or name1 in self.matched1 or \
name2 in self.matched2 or name2_1 in self.matched2:
continue
if self.ignore_sub_names and name.startswith("sub_"):
continue
ast1 = row[10]
ast2 = row[11]
pseudo1 = row[6]
pseudo2 = row[7]
asm1 = row[8]
asm2 = row[9]
ratio = self.check_ratio(ast1, ast2, pseudo1, pseudo2, asm1, asm2)
if float(ratio) == 1.0:
self.best_chooser.add_item(CChooser.Item(ea, name, ea2, name, "Perfect match, same name", 1))
else:
choose.add_item(CChooser.Item(ea, name, ea2, name, "Perfect match, same name", ratio))
self.matched1.add(name)
self.matched1.add(name1)
self.matched2.add(name2)
self.matched2.add(name2_1)
def find_matches(self):
choose = self.partial_chooser
if not self.equal_callgraph and not self.ignore_all_names:
self.find_same_name(choose)
sql = """select f.address, f.name, df.address, df.name,
'All attributes' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.nodes = df.nodes
and f.edges = df.edges
and f.indegree = df.indegree
and f.outdegree = df.outdegree
and f.size = df.size
and f.instructions = df.instructions
and f.mnemonics = df.mnemonics
and f.names = df.names
and f.prototype2 = df.prototype2
and f.cyclomatic_complexity = df.cyclomatic_complexity
and f.primes_value = df.primes_value
and f.bytes_hash = df.bytes_hash
and f.pseudocode_hash1 = df.pseudocode_hash1
and f.pseudocode_primes = df.pseudocode_primes
and f.pseudocode_hash2 = df.pseudocode_hash2
and f.pseudocode_hash3 = df.pseudocode_hash3
and f.strongly_connected = df.strongly_connected
and f.loops = df.loops
and f.tarjan_topological_sort = df.tarjan_topological_sort
and f.strongly_connected_spp = df.strongly_connected_spp
union
select f.address, f.name, df.address, df.name,
'Most attributes' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.nodes = df.nodes
and f.edges = df.edges
and f.indegree = df.indegree
and f.outdegree = df.outdegree
and f.size = df.size
and f.instructions = df.instructions
and f.mnemonics = df.mnemonics
and f.names = df.names
and f.prototype2 = df.prototype2
and f.cyclomatic_complexity = df.cyclomatic_complexity
and f.primes_value = df.primes_value
and f.bytes_hash = df.bytes_hash
and f.strongly_connected = df.strongly_connected
and f.loops = df.loops
and f.tarjan_topological_sort = df.tarjan_topological_sort
and f.strongly_connected_spp = df.strongly_connected_spp"""
log_refresh("Finding with heuristic 'All or most attributes'")
self.add_matches_from_query_ratio(sql, self.best_chooser, self.partial_chooser)
sql = """select f.address, f.name, df.address, df.name,
'Same address, nodes, edges and primes (re-ordered instructions)' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.rva = df.rva
and f.instructions = df.instructions
and f.nodes = df.nodes
and f.edges = df.edges
and f.primes_value = df.primes_value
and f.nodes > 3"""
log_refresh("Finding with heuristic 'Same address, nodes, edges and primes (re-ordered instructions)'")
self.add_matches_from_query_ratio_max(sql, self.partial_chooser, self.unreliable_chooser, 0.5)
sql = """ select distinct f.address ea, f.name name1, df.address ea2, df.name name2,
'Import names hash',
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.names = df.names
and f.names != '[]'
and f.nodes = df.nodes
and f.edges = df.edges
and f.instructions = df.instructions"""
log_refresh("Finding with heuristic 'Import names hash'")
self.add_matches_from_query_ratio(sql, self.best_chooser, self.partial_chooser)
sql = """ select f.address ea, f.name name1, df.address ea2, df.name name2,
'Nodes, edges, complexity, mnemonics, names, prototype2, in-degree and out-degree',
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.nodes = df.nodes
and f.edges = df.edges
and f.mnemonics = df.mnemonics
and f.names = df.names
and f.cyclomatic_complexity = df.cyclomatic_complexity
and f.prototype2 = df.prototype2
and f.indegree = df.indegree
and f.outdegree = df.outdegree
and f.nodes > 3
and f.edges > 3
and f.names != '[]'
union
select f.address ea, f.name name1, df.address ea2, df.name name2,
'Nodes, edges, complexity, mnemonics, names and prototype2' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.nodes = df.nodes
and f.edges = df.edges
and f.mnemonics = df.mnemonics
and f.names = df.names
and f.names != '[]'
and f.cyclomatic_complexity = df.cyclomatic_complexity
and f.prototype2 = df.prototype2"""
log_refresh("Finding with heuristic 'Nodes, edges, complexity, mnemonics, names, prototype, in-degree and out-degree'")
self.add_matches_from_query_ratio(sql, self.partial_chooser, self.partial_chooser)
sql = """ select f.address ea, f.name name1, df.address ea2, df.name name2,
'Mnemonics and names' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.mnemonics = df.mnemonics
and f.instructions = df.instructions
and f.names = df.names
and f.names != '[]'"""
log_refresh("Finding with heuristic 'Mnemonics and names'")
self.add_matches_from_query_ratio(sql, choose, choose)
sql = """ select f.address ea, f.name name1, df.address ea2, df.name name2,
'Mnemonics small-primes-product' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.mnemonics_spp = df.mnemonics_spp
and f.instructions = df.instructions
and df.instructions > 5"""
log_refresh("Finding with heuristic 'Mnemonics small-primes-product'")
self.add_matches_from_query_ratio(sql, choose, choose)
# Search using some of the previous criterias but calculating the
# edit distance
log_refresh("Finding with heuristic 'Small names difference'")
self.search_small_differences(choose)
if self.slow_heuristics:
sql = """select distinct f.address, f.name, df.address, df.name, 'Pseudo-code fuzzy hash' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where df.pseudocode_hash1 = f.pseudocode_hash1
or df.pseudocode_hash2 = f.pseudocode_hash2
or df.pseudocode_hash3 = f.pseudocode_hash3"""
log_refresh("Finding with heuristic 'Pseudo-code fuzzy hashes'")
self.add_matches_from_query_ratio(sql, self.best_chooser, choose)
else:
sql = """select distinct f.address, f.name, df.address, df.name, 'Pseudo-code fuzzy hash' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where df.pseudocode_hash1 = f.pseudocode_hash1"""
log_refresh("Finding with heuristic 'Pseudo-code fuzzy hash'")
self.add_matches_from_query_ratio(sql, self.best_chooser, choose)
sql = """select distinct f.address, f.name, df.address, df.name, 'Similar pseudo-code and names' description,
f.pseudocode, df.pseudocode,
f.pseudocode, df.pseudocode,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.pseudocode_lines = df.pseudocode_lines
and f.names = df.names
and df.names != '[]'
and df.pseudocode_lines > 5
and df.pseudocode is not null
and f.pseudocode is not null"""
log_refresh("Finding with heuristic 'Similar pseudo-code and names'")
self.add_matches_from_query_ratio(sql, self.best_chooser, self.partial_chooser, self.unreliable_chooser)
if self.slow_heuristics:
sql = """select distinct f.address, f.name, df.address, df.name, 'Similar pseudo-code' description,
f.pseudocode, df.pseudocode,
f.pseudocode, df.pseudocode,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.pseudocode_lines = df.pseudocode_lines
and df.pseudocode_lines > 5
and df.pseudocode is not null
and f.pseudocode is not null"""
log_refresh("Finding with heuristic 'Similar pseudo-code'")
self.add_matches_from_query_ratio_max(sql, choose, self.unreliable_chooser, 0.6)
sql = """select distinct f.address, f.name, df.address, df.name, 'Pseudo-code fuzzy AST hash' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where df.pseudocode_primes = f.pseudocode_primes
and f.pseudocode_lines > 5
and length(f.pseudocode_primes) >= 35"""
log_refresh("Finding with heuristic 'Pseudo-code fuzzy AST hash'")
self.add_matches_from_query_ratio(sql, self.best_chooser, choose)
if self.slow_heuristics:
sql = """ select distinct f.address, f.name, df.address, df.name, 'Partial pseudo-code fuzzy hash' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where substr(df.pseudocode_hash1, 1, 16) = substr(f.pseudocode_hash1, 1, 16)
or substr(df.pseudocode_hash2, 1, 16) = substr(f.pseudocode_hash2, 1, 16)
or substr(df.pseudocode_hash3, 1, 16) = substr(f.pseudocode_hash3, 1, 16)"""
log_refresh("Finding with heuristic 'Partial pseudo-code fuzzy hash'")
self.add_matches_from_query_ratio_max(sql, choose, self.unreliable_chooser, 0.5)
sql = """select f.address, f.name, df.address, df.name,
'Topological sort hash' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.strongly_connected = df.strongly_connected
and f.tarjan_topological_sort = df.tarjan_topological_sort
and f.strongly_connected > 3"""
log_refresh("Finding with heuristic 'Topological sort hash'")
self.add_matches_from_query_ratio(sql, self.best_chooser, self.partial_chooser, self.unreliable_chooser)
sql = """ select f.address, f.name, df.address, df.name, 'Same high complexity, prototype and names' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.names = df.names
and f.cyclomatic_complexity = df.cyclomatic_complexity
and f.cyclomatic_complexity >= 20
and f.prototype2 = df.prototype2
and df.names != '[]'"""
log_refresh("Finding with heuristic 'Same high complexity, prototype and names'")
self.add_matches_from_query_ratio(sql, choose, choose)
sql = """ select f.address, f.name, df.address, df.name, 'Same high complexity and names' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.names = df.names
and f.cyclomatic_complexity = df.cyclomatic_complexity
and f.cyclomatic_complexity >= 15
and df.names != '[]'"""
log_refresh("Finding with heuristic 'Same high complexity and names'")
self.add_matches_from_query_ratio_max(sql, choose, self.unreliable_chooser, 0.5)
if self.slow_heuristics:
sql = """select f.address, f.name, df.address, df.name, 'Strongly connected components' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.strongly_connected = df.strongly_connected
and df.strongly_connected > 1
and f.nodes > 5 and df.nodes > 5
and f.strongly_connected_spp > 1
and df.strongly_connected_spp > 1"""
log_refresh("Finding with heuristic 'Strongly connected components'")
self.add_matches_from_query_ratio_max(sql, self.partial_chooser, None, 0.80)
else:
sql = """select f.address, f.name, df.address, df.name, 'Strongly connected components' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.strongly_connected = df.strongly_connected
and df.strongly_connected > 3
and f.nodes > 5 and df.nodes > 5
and f.strongly_connected_spp > 1
and df.strongly_connected_spp > 1"""
log_refresh("Finding with heuristic 'Strongly connected components'")
self.add_matches_from_query_ratio_max(sql, self.partial_chooser, None, 0.80)
if self.slow_heuristics:
sql = """select f.address, f.name, df.address, df.name, 'Loop count' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.loops = df.loops
and df.loops > 1
and f.nodes > 3 and df.nodes > 3"""
log_refresh("Finding with heuristic 'Loop count'")
self.add_matches_from_query_ratio_max(sql, self.partial_chooser, None, 0.49)
sql = """ select f.address, f.name, df.address, df.name, 'Strongly connected components small-primes-product' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.strongly_connected_spp = df.strongly_connected_spp
and df.strongly_connected_spp > 1"""
log_refresh("Finding with heuristic 'Strongly connected components small-primes-product'")
self.add_matches_from_query_ratio(sql, self.best_chooser, self.partial_chooser, self.unreliable_chooser)
sql = """ select f.address, f.name, df.address, df.name, 'Same names and order' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.names = df.names
and df.names != '[]'"""
log_refresh("Finding with heuristic 'Same names and order'")
self.add_matches_from_query_ratio(sql, choose, choose)
sql = """select f.address, f.name, df.address, df.name,
'Same nodes, edges and strongly connected components' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.nodes = df.nodes
and f.edges = df.edges
and f.strongly_connected = df.strongly_connected
and df.nodes > 4"""
log_refresh("Finding with heuristic 'Same nodes, edges and strongly connected components'")
self.add_matches_from_query_ratio(sql, self.best_chooser, choose, self.unreliable_chooser)
def find_experimental_matches(self):
choose = self.unreliable_chooser
if self.slow_heuristics:
sql = """select distinct f.address, f.name, df.address, df.name, 'Similar small pseudo-code' description,
f.pseudocode, df.pseudocode,
f.pseudocode, df.pseudocode,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.pseudocode_lines = df.pseudocode_lines
and df.pseudocode_lines <= 5
and df.pseudocode is not null
and f.pseudocode is not null"""
log_refresh("Finding with heuristic 'Similar small pseudo-code'")
self.add_matches_from_query_ratio_max(sql, self.partial_chooser, choose, 0.49)
sql = """select distinct f.address, f.name, df.address, df.name, 'Small pseudo-code fuzzy AST hash' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where df.pseudocode_primes = f.pseudocode_primes
and f.pseudocode_lines <= 5"""
log_refresh("Finding with heuristic 'Small pseudo-code fuzzy AST hash'")
self.add_matches_from_query_ratio(sql, self.partial_chooser, choose)
sql = """select f.address, f.name, df.address, df.name, 'Equal small pseudo-code' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.pseudocode = df.pseudocode
and df.pseudocode is not null
and f.pseudocode_lines < 5"""
log_refresh("Finding with heuristic 'Equal small pseudo-code'")
self.add_matches_from_query_ratio(sql, self.best_chooser, self.partial_chooser)
sql = """ select f.address, f.name, df.address, df.name, 'Same high complexity, prototype and names' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.names = df.names
and f.cyclomatic_complexity = df.cyclomatic_complexity
and f.cyclomatic_complexity < 20
and f.prototype2 = df.prototype2
and df.names != '[]'"""
log_refresh("Finding with heuristic 'Same low complexity, prototype and names'")
self.add_matches_from_query_ratio_max(sql, self.partial_chooser, choose, 0.5)
sql = """ select f.address, f.name, df.address, df.name, 'Same low complexity and names' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.names = df.names
and f.cyclomatic_complexity = df.cyclomatic_complexity
and f.cyclomatic_complexity < 15
and df.names != '[]'"""
log_refresh("Finding with heuristic 'Same low complexity and names'")
self.add_matches_from_query_ratio_max(sql, self.partial_chooser, choose, 0.5)
if self.slow_heuristics:
# For large databases (>25k functions) it may cause, for a reason,
# the following error: OperationalError: database or disk is full
sql = """ select f.address, f.name, df.address, df.name,
'Same graph' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.nodes = df.nodes
and f.edges = df.edges
and f.indegree = df.indegree
and f.outdegree = df.outdegree
and f.cyclomatic_complexity = df.cyclomatic_complexity
and f.strongly_connected = df.strongly_connected
and f.loops = df.loops
and f.tarjan_topological_sort = df.tarjan_topological_sort
and f.strongly_connected_spp = df.strongly_connected_spp
order by
case when f.size = df.size then 1 else 0 end +
case when f.instructions = df.instructions then 1 else 0 end +
case when f.mnemonics = df.mnemonics then 1 else 0 end +
case when f.names = df.names then 1 else 0 end +
case when f.prototype2 = df.prototype2 then 1 else 0 end +
case when f.primes_value = df.primes_value then 1 else 0 end +
case when f.bytes_hash = df.bytes_hash then 1 else 0 end +
case when f.pseudocode_hash1 = df.pseudocode_hash1 then 1 else 0 end +
case when f.pseudocode_primes = df.pseudocode_primes then 1 else 0 end +
case when f.pseudocode_hash2 = df.pseudocode_hash2 then 1 else 0 end +
case when f.pseudocode_hash3 = df.pseudocode_hash3 then 1 else 0 end DESC"""
log_refresh("Finding with heuristic 'Same graph'")
self.add_matches_from_query_ratio(sql, self.best_chooser, self.partial_chooser, self.unreliable_chooser)
def find_unreliable_matches(self):
choose = self.unreliable_chooser
if self.slow_heuristics:
sql = """select f.address, f.name, df.address, df.name, 'Strongly connected components' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.strongly_connected = df.strongly_connected
and df.strongly_connected > 2"""
log_refresh("Finding with heuristic 'Strongly connected components'")
self.add_matches_from_query_ratio_max(sql, self.partial_chooser, choose, 0.54)
sql = """select f.address, f.name, df.address, df.name, 'Loop count' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.loops = df.loops
and df.loops > 1"""
log_refresh("Finding with heuristic 'Loop count'")
self.add_matches_from_query_ratio(sql, self.partial_chooser, choose)
sql = """ select distinct f.address ea, f.name name1, df.address ea2, df.name name2,
'Bytes hash' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.bytes_hash = df.bytes_hash
and f.instructions = df.instructions"""
log_refresh("Finding with heuristic 'Bytes hash'")
self.add_matches_from_query_ratio(sql, self.best_chooser, self.partial_chooser, self.unreliable_chooser)
sql = """ select distinct f.address ea, f.name name1, df.address ea2, df.name name2,
'Nodes, edges, complexity and mnemonics' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.nodes = df.nodes
and f.edges = df.edges
and f.mnemonics = df.mnemonics
and f.cyclomatic_complexity = df.cyclomatic_complexity
and f.nodes > 1 and f.edges > 0"""
log_refresh("Finding with heuristic 'Nodes, edges, complexity and mnemonics'")
self.add_matches_from_query_ratio(sql, self.best_chooser, self.partial_chooser)
sql = """ select distinct f.address ea, f.name name1, df.address ea2, df.name name2,
'Nodes, edges, complexity and prototype' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.nodes = df.nodes
and f.edges = df.edges
and f.prototype2 = df.prototype2
and f.cyclomatic_complexity = df.cyclomatic_complexity
and f.prototype2 != 'int()'"""
log_refresh("Finding with heuristic 'Nodes, edges, complexity and prototype'")
self.add_matches_from_query_ratio(sql, self.partial_chooser, choose)
sql = """ select distinct f.address ea, f.name name1, df.address ea2, df.name name2,
'Nodes, edges, complexity, in-degree and out-degree' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.nodes = df.nodes
and f.edges = df.edges
and f.cyclomatic_complexity = df.cyclomatic_complexity
and f.nodes > 1 and f.edges > 0
and f.indegree = df.indegree
and f.outdegree = df.outdegree"""
log_refresh("Finding with heuristic 'Nodes, edges, complexity, in-degree and out-degree'")
self.add_matches_from_query_ratio(sql, self.partial_chooser, choose)
sql = """ select distinct f.address ea, f.name name1, df.address ea2, df.name name2,
'Nodes, edges and complexity' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.nodes = df.nodes
and f.edges = df.edges
and f.cyclomatic_complexity = df.cyclomatic_complexity
and f.nodes > 1 and f.edges > 0"""
log_refresh("Finding with heuristic 'Nodes, edges and complexity'")
self.add_matches_from_query_ratio(sql, self.partial_chooser, choose)
sql = """select f.address, f.name, df.address, df.name, 'Similar small pseudo-code' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where df.pseudocode is not null
and f.pseudocode is not null
and f.pseudocode_lines = df.pseudocode_lines
and df.pseudocode_lines > 5"""
log_refresh("Finding with heuristic 'Similar small pseudo-code'")
self.add_matches_from_query_ratio_max(sql, self.partial_chooser, self.unreliable_chooser, 0.5)
sql = """ select f.address, f.name, df.address, df.name, 'Same high complexity' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.cyclomatic_complexity = df.cyclomatic_complexity
and f.cyclomatic_complexity >= 50"""
log_refresh("Finding with heuristic 'Same high complexity'")
self.add_matches_from_query_ratio(sql, self.partial_chooser, choose)
def find_unmatched(self):
cur = self.db_cursor()
sql = "select name from functions"
cur.execute(sql)
rows = cur.fetchall()
if len(rows) > 0:
choose = CChooser("Unmatched in secondary", self, False)
for row in rows:
name = row[0]
demangled_name = Demangle(str(name), INF_SHORT_DN)
if demangled_name is not None:
name = demangled_name
if name not in self.matched1:
ea = LocByName(str(name))
choose.add_item(CChooser.Item(ea, name))
self.unmatched_second = choose
sql = "select name, address from diff.functions"
cur.execute(sql)
rows = cur.fetchall()
if len(rows) > 0:
choose = CChooser("Unmatched in primary", self, False)
for row in rows:
name = row[0]
demangled_name = Demangle(str(name), INF_SHORT_DN)
if demangled_name is not None:
name = demangled_name
if name not in self.matched2:
ea = row[1]
choose.add_item(CChooser.Item(ea, name))
self.unmatched_primary = choose
cur.close()
def create_choosers(self):
self.unreliable_chooser = CChooser("Unreliable matches", self)
self.partial_chooser = CChooser("Partial matches", self)
self.best_chooser = CChooser("Best matches", self)
def show_choosers(self, force=False):
if len(self.best_chooser.items) > 0:
self.best_chooser.show(force)
if len(self.partial_chooser.items) > 0:
self.partial_chooser.show(force)
if self.unreliable_chooser is not None and len(self.unreliable_chooser.items) > 0:
self.unreliable_chooser.show(force)
if self.unmatched_primary is not None and len(self.unmatched_primary.items) > 0:
self.unmatched_primary.show(force)
if self.unmatched_second is not None and len(self.unmatched_second.items) > 0:
self.unmatched_second.show(force)
def register_menu(self):
global g_bindiff
g_bindiff = self
idaapi.add_menu_item("Edit/Plugins/", "Diaphora - Show results", "F3", 0, show_choosers, ())
Warning("""AUTOHIDE REGISTRY\nIf you close one tab you can always re-open it by pressing F3
or selecting Edit -> Plugins -> Diaphora - Show results""")
def diff(self, db):
self.last_diff_db = db
cur = self.db_cursor()
cur.execute('attach "%s" as diff' % db)
try:
cur.execute("select value from diff.version")
except:
log("Error: %s " % sys.exc_info()[1])
Warning("The selected file does not like a valid SQLite exported database!")
cur.close()
return False
row = cur.fetchone()
if not row:
Warning("Invalid database!")
return False
if row[0] != VERSION_VALUE:
Warning("The database is from a different version (current %s, database %s)!" % (VERSION_VALUE, row[0]))
return False
# Create the choosers
self.create_choosers()
try:
log_refresh("Performing diffing...", True)
do_continue = True
if self.equal_db():
log("The databases seems to be 100% equal")
if askyn_c(0, "HIDECANCEL\nThe databases seems to be 100% equal. Do you want to continue anyway?") != 1:
do_continue = False
if do_continue:
# Compare the call graphs
self.check_callgraph()
# Find the unmodified functions
log_refresh("Finding best matches...")
self.find_equal_matches()
# Find the modified functions
log_refresh("Finding partial matches")
self.find_matches()
if self.unreliable:
# Find using likely unreliable methods modified functions
log_refresh("Finding probably unreliable matches")
self.find_unreliable_matches()
if self.experimental:
# Find using experimental methods modified functions
log_refresh("Finding experimental matches")
self.find_experimental_matches()
# Show the list of unmatched functions in both databases
log_refresh("Finding unmatched functions")
self.find_unmatched()
# And, finally, show the list of best and partial matches and
# register the hotkey for re-opening results
self.show_choosers()
self.register_menu()
log("Done")
finally:
cur.close()
hide_wait_box()
return True
#-----------------------------------------------------------------------
def remove_file(filename):
try:
os.remove(filename)
except:
# Fix for Bug #5: https://github.com/joxeankoret/diaphora/issues/5
#
# For some reason, in Windows, the handle to the SQLite database is
# not closed, and I really try to be sure that all the databases are
# detached, no cursor is leaked, etc... So, in case we cannot remove
# the database file because it's still being used by IDA in Windows
# for some unknown reason, just drop the database's tables and after
# that continue normally.
with sqlite3.connect(filename) as db:
cur = db.cursor()
try:
funcs = ["functions", "program", "program_data", "version",
"instructions", "basic_blocks", "bb_relations",
"bb_instructions", "function_bblocks"]
for func in funcs:
db.execute("drop table if exists %s" % func)
finally:
cur.close()
class BinDiffOptions:
def __init__(self, **kwargs):
total_functions = len(list(Functions()))
self.file_out = kwargs.get('file_out', os.path.splitext(GetIdbPath())[0] + ".sqlite")
self.file_in = kwargs.get('file_in', '')
self.use_decompiler = kwargs.get('use_decompiler', True)
self.unreliable = kwargs.get('unreliable', True)
self.slow = kwargs.get('slow', True)
# Enable, by default, relaxed calculations on difference ratios for
# 'big' databases (>20k functions)
self.relax = kwargs.get('relax', total_functions > 20000)
if self.relax:
Warning(MSG_RELAXED_RATIO_ENABLED)
self.experimental = kwargs.get('experimental', False)
self.min_ea = kwargs.get('min_ea', MinEA())
self.max_ea = kwargs.get('max_ea', MaxEA())
self.ida_subs = kwargs.get('ida_subs', True)
self.ignore_sub_names = kwargs.get('ignore_sub_names', True)
self.ignore_all_names = kwargs.get('ignore_all_names', False)
# Enable, by default, exporting only function summaries for huge dbs.
self.func_summaries_only = kwargs.get('func_summaries_only', total_functions > 100000)
#-----------------------------------------------------------------------
def _diff_or_export(use_ui, **options):
global g_bindiff
total_functions = len(list(Functions()))
if GetIdbPath() == "" or total_functions == 0:
Warning("No IDA database opened or no function in the database.\nPlease open an IDA database and create some functions before running this script.")
return
opts = BinDiffOptions(**options)
if use_ui:
x = CBinDiffExporterSetup()
x.Compile()
x.set_options(opts)
if not x.Execute():
return
opts = x.get_options()
if opts.file_out == opts.file_in:
Warning("Both databases are the same file!")
return
elif opts.file_out == "" or len(opts.file_out) < 5:
Warning("No output database selected or invalid filename. Please select a database file.")
return
elif opts.file_out[len(opts.file_out)-4:].lower() in [".idb", ".i64"] or opts.file_in[len(opts.file_in)-4:].lower() in [".idb", ".i64"]:
Warning("One of the selected databases is an IDA database (IDB or I64), not a SQLite database!")
return
elif opts.file_out.lower().endswith(".til") or opts.file_in.lower().endswith(".id0") or opts.file_in.lower().endswith(".id1") or opts.file_in.lower().endswith(".nam"):
Warning("One of the selected databases is an IDA temporary file, not a SQLite database!")
return
export = True
if os.path.exists(opts.file_out):
ret = askyn_c(0, "Export database already exists. Do you want to overwrite it?")
if ret == -1:
log("Cancelled")
return
if ret == 0:
export = False
if export:
if g_bindiff is not None:
g_bindiff = None
remove_file(opts.file_out)
log("Database %s removed" % repr(opts.file_out))
try:
bd = CBinDiff(opts.file_out)
bd.use_decompiler_always = opts.use_decompiler
bd.unreliable = opts.unreliable
bd.slow_heuristics = opts.slow
bd.relaxed_ratio = opts.relax
bd.experimental = opts.experimental
bd.min_ea = opts.min_ea
bd.max_ea = opts.max_ea
bd.ida_subs = opts.ida_subs
bd.ignore_sub_names = opts.ignore_sub_names
bd.ignore_all_names = opts.ignore_all_names
bd.function_summaries_only = opts.func_summaries_only
bd.max_processed_rows = MAX_PROCESSED_ROWS * max(total_functions / 20000, 1)
bd.timeout = TIMEOUT_LIMIT * max(total_functions / 20000, 1)
if export:
if os.getenv("DIAPHORA_PROFILE") is not None:
log("*** Profiling export ***")
import cProfile
profiler = cProfile.Profile()
profiler.runcall(bd.export)
profiler.print_stats(sort="time")
else:
bd.export()
log("Database exported")
if opts.file_in != "":
if os.getenv("DIAPHORA_PROFILE") is not None:
log("*** Profiling diff ***")
import cProfile
profiler = cProfile.Profile()
profiler.runcall(bd.diff, opts.file_in)
profiler.print_stats(sort="time")
else:
bd.diff(opts.file_in)
except:
print("Error: %s" % sys.exc_info()[1])
traceback.print_exc()
return bd
def diff_or_export_ui():
return _diff_or_export(True)
def diff_or_export(**options):
return _diff_or_export(False, **options)
if __name__ == "__main__":
if os.getenv("DIAPHORA_AUTO") is not None:
file_out = os.getenv("DIAPHORA_EXPORT_FILE")
if file_out is None:
raise Exception("No export file specified!")
use_decompiler = os.getenv("DIAPHORA_USE_DECOMPILER")
if use_decompiler is None:
use_decompiler = False
bd = CBinDiff(file_out)
bd.use_decompiler_always = use_decompiler
if os.path.exists(file_out):
if g_bindiff is not None:
g_bindiff = None
remove_file(file_out)
log("Database %s removed" % repr(file_out))
bd.export()
else:
diff_or_export_ui()
Changed minor version
#!/usr/bin/python
"""
Diaphora, a diffing plugin for IDA
Copyright (c) 2015, Joxean Koret
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
KNOWN BUGS:
[ ] The choosers aren't updated when importing stuff.
TODO (for future versions):
[ ] Heuristics based on the call graph. This is why BinDiff was/is the
best one.
[ ] Heuristics based on switchs (SPP with get_switch_info_ex(x).ncases?).
[ ] Instruction-level comment porting.
[ ] Import all names (global variables, etc...).
"""
import os
import sys
import time
import json
import decimal
import sqlite3
import traceback
from hashlib import md5
from cStringIO import StringIO
from difflib import SequenceMatcher, HtmlDiff
from pygments import highlight
from pygments.lexers import NasmLexer, CppLexer
from pygments.formatters import HtmlFormatter
from idc import *
from idaapi import *
from idautils import *
from PySide import QtGui
from others.tarjan_sort import strongly_connected_components, robust_topological_sort
from jkutils.kfuzzy import CKoretFuzzyHashing
from jkutils.factor import (FACTORS_CACHE, difference, difference_ratio,
primesbelow as primes)
#-----------------------------------------------------------------------
VERSION_VALUE = "1.0.4"
COPYRIGHT_VALUE="Copyright(c) 2015 Joxean Koret"
COMMENT_VALUE="Diaphora diffing plugin for IDA version %s" % VERSION_VALUE
# Constants unexported in IDA Python
PRTYPE_SEMI=0x0008
# Used to clean-up the pseudo-code and assembly dumps in order to get
# better comparison ratios
CMP_REPS = ["loc_", "sub_", "qword_", "dword_", "byte_", "word_", "off_",
"unk_", "stru_", "dbl_"]
# Messages
MSG_RELAXED_RATIO_ENABLED = """AUTOHIDE DATABASE\n<b>Relaxed ratio calculations</b> will be enabled. It will ignore many small
modifications to functions and will match more functions with higher ratios. Enable this option if you're only interested in the
new functionality. Disable it for patch diffing if you're interested in small modifications (like buffer sizes).
<br><br>
This is automatically done for diffing big databases (more than 20,000 functions in the database).<br><br>
You can disable it by un-checking the 'Relaxed calculations of differences ratios' option."""
MSG_FUNCTION_SUMMARIES_ONLY = """AUTOHIDE DATABASE\n<b>Do not export basic blocks or instructions</b> will be enabled.<br>
It will not export the information relative to basic blocks or<br>
instructions and 'Diff assembly in a graph' will not be available.
<br><br>
This is automatically done for exporting huge databases with<br>
more than 100,000 functions.<br><br>
You can disable it by un-checking the 'Do not export basic blocks<br>
or instructions' option."""
#-----------------------------------------------------------------------
def log(msg):
Message("[%s] %s\n" % (time.asctime(), msg))
#-----------------------------------------------------------------------
def log_refresh(msg, show=False):
if show:
show_wait_box(msg)
else:
replace_wait_box(msg)
log(msg)
#-----------------------------------------------------------------------
def quick_ratio(buf1, buf2):
try:
if buf1 is None or buf2 is None or buf1 == "" or buf1 == "":
return 0
s = SequenceMatcher(None, buf1.split("\n"), buf2.split("\n"))
return s.quick_ratio()
except:
print "quick_ratio:", str(sys.exc_info()[1])
return 0
#-----------------------------------------------------------------------
def real_quick_ratio(buf1, buf2):
try:
if buf1 is None or buf2 is None or buf1 == "" or buf1 == "":
return 0
s = SequenceMatcher(None, buf1.split("\n"), buf2.split("\n"))
return s.real_quick_ratio()
except:
print "real_quick_ratio:", str(sys.exc_info()[1])
return 0
#-----------------------------------------------------------------------
def ast_ratio(ast1, ast2):
if ast1 == ast2:
return 1.0
elif ast1 is None or ast2 is None:
return 0
return difference_ratio(decimal.Decimal(ast1), decimal.Decimal(ast2))
#-----------------------------------------------------------------------
class CHtmlViewer(PluginForm):
def OnCreate(self, form):
self.parent = self.FormToPySideWidget(form)
self.PopulateForm()
self.browser = None
self.layout = None
return 1
def PopulateForm(self):
self.layout = QtGui.QVBoxLayout()
self.browser = QtGui.QTextBrowser()
# Commented for now
#self.browser.setLineWrapMode(QtGui.QTextEdit.NoWrap)
self.browser.setHtml(self.text)
self.browser.setReadOnly(True)
self.browser.setFontWeight(12)
self.layout.addWidget(self.browser)
self.parent.setLayout(self.layout)
def Show(self, text, title):
self.text = text
return PluginForm.Show(self, title)
#-----------------------------------------------------------------------
class CChooser(Choose2):
class Item:
def __init__(self, ea, name, ea2 = None, name2 = None, desc="100% equal", ratio = 0):
self.ea = ea
self.vfname = name
self.ea2 = ea2
self.vfname2 = name2
self.description = desc
self.ratio = ratio
self.cmd_import_selected = None
self.cmd_import_all = None
self.cmd_import_all_funcs = None
def __str__(self):
return '%08x' % self.ea
def __init__(self, title, bindiff, show_commands=True):
if title.startswith("Unmatched in"):
Choose2.__init__(self, title, [ ["Line", 8], ["Address", 10], ["Name", 20] ], Choose2.CH_MULTI)
else:
Choose2.__init__(self, title, [ ["Line", 8], ["Address", 10], ["Name", 20], ["Address 2", 10], ["Name 2", 20], ["Ratio", 5], ["Description", 30] ], Choose2.CH_MULTI)
if title == "Unmatched in primary":
self.primary = False
else:
self.primary = True
self.n = 0
self.items = []
self.icon = 41
self.bindiff = bindiff
self.show_commands = show_commands
self.cmd_diff_asm = None
self.cmd_diff_graph = None
self.cmd_diff_c = None
self.cmd_import_selected = None
self.cmd_import_all = None
self.cmd_import_all_funcs = None
self.cmd_show_asm = None
self.cmd_show_pseudo = None
self.cmd_highlight_functions = None
self.cmd_unhighlight_functions = None
self.selected_items = []
def OnClose(self):
"""space holder"""
return True
def OnEditLine(self, n):
"""space holder"""
def OnInsertLine(self):
pass
def OnSelectLine(self, n):
item = self.items[int(n)]
if self.primary:
try:
jump_ea = int(item[1], 16)
# Only jump for valid addresses
if isEnabled(jump_ea):
jumpto(jump_ea)
except:
print "OnSelectLine", sys.exc_info()[1]
else:
self.bindiff.show_asm(self.items[n], self.primary)
def OnGetLine(self, n):
try:
return self.items[n]
except:
print "OnGetLine", sys.exc_info()[1]
def OnGetSize(self):
return len(self.items)
def OnDeleteLine(self, n):
try:
del self.items[n]
self.n -= 1
except:
pass
return True
def OnRefresh(self, n):
return n
def add_item(self, item):
if self.title.startswith("Unmatched in"):
self.items.append(["%05lu" % self.n, "%08x" % int(item.ea), item.vfname])
else:
self.items.append(["%05lu" % self.n, "%08x" % int(item.ea), item.vfname, "%08x" % int(item.ea2), item.vfname2, "%.3f" % item.ratio, item.description])
self.n += 1
def show(self, force=False):
t = self.Show()
if t < 0:
return False
if self.show_commands and (self.cmd_diff_asm is None or force):
# create aditional actions handlers
self.cmd_diff_asm = self.AddCommand("Diff assembly")
self.cmd_diff_c = self.AddCommand("Diff pseudo-code")
self.cmd_diff_graph = self.AddCommand("Diff assembly in a graph")
self.cmd_import_selected = self.AddCommand("Import selected")
self.cmd_import_all = self.AddCommand("Import *all* functions")
self.cmd_import_all_funcs = self.AddCommand("Import *all* data for sub_* functions")
self.cmd_highlight_functions = self.AddCommand("Highlight matches")
self.cmd_unhighlight_functions = self.AddCommand("Unhighlight matches")
elif not self.show_commands and (self.cmd_show_asm is None or force):
self.cmd_show_asm = self.AddCommand("Show assembly")
self.cmd_show_pseudo = self.AddCommand("Show pseudo-code")
return True
def get_color(self):
if self.title.startswith("Best"):
return 0xffff99
elif self.title.startswith("Partial"):
return 0x99ff99
elif self.title.startswith("Unreliable"):
return 0x9999ff
def OnCommand(self, n, cmd_id):
# Aditional right-click-menu commands handles
if cmd_id == self.cmd_import_all:
if askyn_c(1, "HIDECANCEL\nDo you really want to import all matched functions, comments, prototypes and definitions?") == 1:
self.bindiff.import_all(self.items)
elif cmd_id == self.cmd_import_all_funcs:
if askyn_c(1, "HIDECANCEL\nDo you really want to import all IDA named matched functions, comments, prototypes and definitions?") == 1:
self.bindiff.import_all_auto(self.items)
elif cmd_id == self.cmd_import_selected:
if len(self.selected_items) <= 1:
self.bindiff.import_one(self.items[n])
else:
if askyn_c(1, "HIDECANCEL\nDo you really want to import all selected IDA named matched functions, comments, prototypes and definitions?") == 1:
self.bindiff.import_selected(self.items, self.selected_items)
elif cmd_id == self.cmd_diff_c:
self.bindiff.show_pseudo_diff(self.items[n])
elif cmd_id == self.cmd_diff_asm:
self.bindiff.show_asm_diff(self.items[n])
elif cmd_id == self.cmd_show_asm:
self.bindiff.show_asm(self.items[n], self.primary)
elif cmd_id == self.cmd_show_pseudo:
self.bindiff.show_pseudo(self.items[n], self.primary)
elif cmd_id == self.cmd_highlight_functions:
if askyn_c(1, "HIDECANCEL\nDo you want to change the background color of each matched function?") == 1:
color = self.get_color()
for item in self.items:
ea = int(item[1], 16)
if not SetColor(ea, CIC_FUNC, color):
print "Error setting color for %x" % ea
Refresh()
elif cmd_id == self.cmd_unhighlight_functions:
for item in self.items:
ea = int(item[1], 16)
if not SetColor(ea, CIC_FUNC, 0xFFFFFF):
print "Error setting color for %x" % ea
Refresh()
elif cmd_id == self.cmd_diff_graph:
item = self.items[n]
ea1 = int(item[1], 16)
name1 = item[2]
ea2 = int(item[3], 16)
name2 = item[4]
log("Diff graph for 0x%x - 0x%x" % (ea1, ea2))
self.bindiff.graph_diff(ea1, name1, ea2, name2)
return True
def OnSelectionChange(self, sel_list):
self.selected_items = sel_list
def OnGetLineAttr(self, n):
if not self.title.startswith("Unmatched"):
item = self.items[n]
ratio = float(item[5])
red = int(255 * (1 - ratio))
green = int(128 * ratio)
color = int("0x00%02x%02x" % (green, red), 16)
return [color, 0]
return [0xFFFFFF, 0]
#-----------------------------------------------------------------------
class CBinDiffExporterSetup(Form):
def __init__(self):
s = r"""Diaphora BinDiff
Please select the path to the SQLite database to save the current IDA database and the path of the SQLite database to diff against.
If no SQLite diff database is selected, it will just export the current IDA database to SQLite format. Leave the 2nd field empty if you are
exporting the first database.
SQLite databases: Export filter limits:
<#Select a file to export the current IDA database to SQLite format#Export IDA database to SQLite :{iFileSave}> <#Minimum address to find functions to export#From address:{iMinEA}>
<#Select the SQLite database to diff against #SQLite database to diff against:{iFileOpen}> <#Maximum address to find functions to export#To address :{iMaxEA}>
<Use the decompiler if available:{rUseDecompiler}>
<#Enable if you want neither sub_* functions nor library functions to be exported#Export only non-IDA generated functions:{rNonIdaSubs}>
<#Export only function summaries, not all instructions. Showing differences in a graph between functions will not be available.#Do not export instructions and basic blocks:{rFuncSummariesOnly}>
<Use probably unreliable methods:{rUnreliable}>
<Recommended to disable with databases with more than 5.000 functions#Use slow heuristics:{rSlowHeuristics}>
<#Enable this option if you aren't interested in small changes#Relaxed calculations of differences ratios:{rRelaxRatio}>
<Use experimental heuristics:{rExperimental}>
<#Enable this option to ignore sub_* names for the 'Same name' heuristic.#Ignore automatically generated names:{rIgnoreSubNames}>
<#Enable this option to ignore all function names for the 'Same name' heuristic.#Ignore all function names:{rIgnoreAllNames}>{cGroup1}>
NOTE: Don't select IDA database files (.IDB, .I64) as only SQLite databases are considered.
"""
args = {'iFileSave': Form.FileInput(save=True, swidth=40),
'iFileOpen': Form.FileInput(open=True, swidth=40),
'iMinEA': Form.NumericInput(tp=Form.FT_ADDR, swidth=22),
'iMaxEA': Form.NumericInput(tp=Form.FT_ADDR, swidth=22),
'cGroup1' : Form.ChkGroupControl(("rUseDecompiler",
"rUnreliable",
"rNonIdaSubs",
"rSlowHeuristics",
"rRelaxRatio",
"rExperimental",
"rFuncSummariesOnly",
"rIgnoreSubNames",
"rIgnoreAllNames"))}
Form.__init__(self, s, args)
def set_options(self, opts):
if opts.file_out is not None:
self.iFileSave.value = opts.file_out
if opts.file_in is not None:
self.iFileOpen.value = opts.file_in
self.rUseDecompiler.checked = opts.use_decompiler
self.rUnreliable.checked = opts.unreliable
self.rSlowHeuristics.checked = opts.slow
self.rRelaxRatio.checked = opts.relax
self.rExperimental.checked = opts.experimental
self.iMinEA.value = opts.min_ea
self.iMaxEA.value = opts.max_ea
self.rNonIdaSubs.checked = opts.ida_subs == False
self.rIgnoreSubNames.checked = opts.ignore_sub_names
self.rIgnoreAllNames.checked = opts.ignore_all_names
self.rFuncSummariesOnly.checked = opts.func_summaries_only
def get_options(self):
opts = dict(
file_out = self.iFileSave.value,
file_in = self.iFileOpen.value,
use_decompiler = self.rUseDecompiler.checked,
unreliable = self.rUnreliable.checked,
slow = self.rSlowHeuristics.checked,
relax = self.rRelaxRatio.checked,
experimental = self.rExperimental.checked,
min_ea = self.iMinEA.value,
max_ea = self.iMaxEA.value,
ida_subs = self.rNonIdaSubs.checked == False,
ignore_sub_names = self.rIgnoreSubNames.checked,
ignore_all_names = self.rIgnoreAllNames.checked,
func_summaries_only = self.rFuncSummariesOnly.checked
)
return BinDiffOptions(**opts)
#-----------------------------------------------------------------------
try:
class CAstVisitor(ctree_visitor_t):
def __init__(self, cfunc):
self.primes = primes(1024)
ctree_visitor_t.__init__(self, CV_FAST)
self.cfunc = cfunc
self.primes_hash = 1
return
def visit_expr(self, expr):
try:
self.primes_hash *= self.primes[expr.op]
except:
traceback.print_exc()
return 0
def visit_insn(self, ins):
try:
self.primes_hash *= self.primes[ins.op]
except:
traceback.print_exc()
return 0
except:
# It seems it may cause "problems" with trial versions... may be it
# causes problems too with versions without the decompiler???
class CAstVisitor:
pass
#-----------------------------------------------------------------------
class timeraction_t(object):
def __init__(self, func, args, interval):
self.func = func
self.args = args
self.interval = interval
self.obj = idaapi.register_timer(self.interval, self)
if self.obj is None:
raise RuntimeError, "Failed to register timer"
def __call__(self):
if self.args is not None:
self.func(self.args)
else:
self.func()
return -1
#-----------------------------------------------------------------------
class uitimercallback_t(object):
def __init__(self, g, interval):
self.interval = interval
self.obj = idaapi.register_timer(self.interval, self)
if self.obj is None:
raise RuntimeError, "Failed to register timer"
self.g = g
def __call__(self):
if not "GetTForm" in dir(self.g):
#log("Notice: IDA 6.6 doesn't support GetTForm, as so, it isn't possible to change the zoom.")
return -1
f = self.g.GetTForm()
switchto_tform(f, 1)
process_ui_action("GraphZoomFit", 0)
return -1
#-----------------------------------------------------------------------
class CDiffGraphViewer(GraphViewer):
def __init__(self, title, g, colours):
try:
GraphViewer.__init__(self, title, False)
self.graph = g[0]
self.relations = g[1]
self.nodes = {}
self.colours = colours
except:
Warning("CDiffGraphViewer: OnInit!!! " + str(sys.exc_info()[1]))
def OnRefresh(self):
try:
self.Clear()
self.nodes = {}
for key in self.graph:
self.nodes[key] = self.AddNode([key, self.graph[key]])
for key in self.relations:
if not key in self.nodes:
self.nodes[key] = self.AddNode([key, [[0, 0, ""]]])
parent_node = self.nodes[key]
for child in self.relations[key]:
if not child in self.nodes:
self.nodes[child] = self.AddNode([child, [[0, 0, ""]]])
child_node = self.nodes[child]
self.AddEdge(parent_node, child_node)
return True
except:
print "GraphViewer Error:", sys.exc_info()[1]
return True
def OnGetText(self, node_id):
try:
ea, rows = self[node_id]
if ea in self.colours:
colour = self.colours[ea]
else:
colour = 0xFFFFFF
ret = []
for row in rows:
ret.append(row[2])
label = "\n".join(ret)
return (label, colour)
except:
print "GraphViewer.OnGetText:", sys.exc_info()[1]
return ("ERROR", 0x000000)
def Show(self):
return GraphViewer.Show(self)
#-----------------------------------------------------------------------
g_bindiff = None
def show_choosers():
global g_bindiff
if g_bindiff is not None:
g_bindiff.show_choosers(True)
#-----------------------------------------------------------------------
MAX_PROCESSED_ROWS = 1000000
TIMEOUT_LIMIT = 60 * 2
#-----------------------------------------------------------------------
# Fix for people using IDASkins with very h4x0r $tYl3z like the
# Consonance color scheme
HtmlDiff._styles = """
table.diff {
font-family:Courier;
border:medium;
background-color:#ffffff;
color:#000000
}
.diff_header {background-color:#e0e0e0}
td.diff_header {text-align:right}
.diff_next {background-color:#c0c0c0}
.diff_add {background-color:#aaffaa}
.diff_chg {background-color:#ffff77}
.diff_sub {background-color:#ffaaaa}"""
#-----------------------------------------------------------------------
class CBinDiff:
def __init__(self, db_name):
self.names = dict(Names())
self.primes = primes(1024*1024)
self.db_name = db_name
self.open_db()
self.matched1 = set()
self.matched2 = set()
self.total_functions1 = None
self.total_functions2 = None
self.equal_callgraph = False
self.kfh = CKoretFuzzyHashing()
# With this block size we're sure it will only apply to functions
# somehow big
self.kfh.bsize = 32
self.pseudo = {}
self.pseudo_hash = {}
self.unreliable = False
self.relaxed_ratio = False
self.experimental = False
self.slow_heuristics = False
self.use_decompiler_always = True
self.best_chooser = None
self.partial_chooser = None
self.unreliable_chooser = None
self.unmatched_second = None
self.unmatched_primary = None
self.last_diff_db = None
####################################################################
# LIMITS
#
# Do not run heuristics for more than 2 minutes per each 20.000
# functions.
self.timeout = TIMEOUT_LIMIT
# It's typical in SQL queries to get a cartesian product of the
# results in the functions tables. Do not process more than this
# value per each 20k functions.
self.max_processed_rows = MAX_PROCESSED_ROWS
# Limits to filter the functions to export
self.min_ea = MinEA()
self.max_ea = MaxEA()
# Export only non IDA automatically generated function names? I.e.,
# excluding these starting with sub_*
self.ida_subs = True
# Export only function summaries instead of also exporting both the
# basic blocks and all instructions used by functions?
self.function_summaries_only = False
# Ignore IDA's automatically generated sub_* names for heuristics
# like the 'Same name'?
self.ignore_sub_names = True
# Ignore any and all function names for the 'Same name' heuristic?
self.ignore_all_names = True
####################################################################
def __del__(self):
if self.db is not None:
try:
if self.last_diff_db is not None:
with self.db.cursor():
cur.execute('detach "%s"' % self.last_diff_db)
except:
pass
self.db_close()
def open_db(self):
self.db = sqlite3.connect(self.db_name)
self.db.text_factory = str
self.create_schema()
def db_cursor(self):
if self.db is None:
self.open_db()
return self.db.cursor()
def db_close(self):
self.db.close()
self.db = None
def create_schema(self):
cur = self.db_cursor()
cur.execute("PRAGMA foreign_keys = ON")
sql = """ create table if not exists functions (
id integer primary key,
name varchar(255),
address text unique,
nodes integer,
edges integer,
indegree integer,
outdegree integer,
size integer,
instructions integer,
mnemonics text,
names text,
prototype text,
cyclomatic_complexity integer,
primes_value text,
comment text,
mangled_function text,
bytes_hash text,
pseudocode text,
pseudocode_lines integer,
pseudocode_hash1 text,
pseudocode_primes text,
function_flags integer,
assembly text,
prototype2 text,
pseudocode_hash2 text,
pseudocode_hash3 text,
strongly_connected integer,
loops integer,
rva text unique,
tarjan_topological_sort text,
strongly_connected_spp text,
clean_assembly text,
clean_pseudo text,
mnemonics_spp text) """
cur.execute(sql)
sql = """ create table if not exists program (
id integer primary key,
callgraph_primes text,
callgraph_all_primes text,
md5sum text
) """
cur.execute(sql)
sql = """ create table if not exists program_data (
id integer primary key,
name varchar(255),
type varchar(255),
value text
)"""
cur.execute(sql)
sql = """ create table if not exists version (value text) """
cur.execute(sql)
sql = """ create table if not exists instructions (
id integer primary key,
address text unique,
disasm text,
mnemonic text,
comment1 text,
comment2 text) """
cur.execute(sql)
sql = "create index if not exists idx_instructions_address on instructions (address)"
cur.execute(sql)
sql = """ create table if not exists basic_blocks (
id integer primary key,
num integer,
address text unique)"""
cur.execute(sql)
sql = """ create table if not exists bb_relations (
id integer primary key,
parent_id integer not null references basic_blocks(id) ON DELETE CASCADE,
child_id integer not null references basic_blocks(id) ON DELETE CASCADE)"""
cur.execute(sql)
sql = "create index if not exists idx_bb_relations on bb_relations(parent_id, child_id)"
cur.execute(sql)
sql = """ create table if not exists bb_instructions (
id integer primary key,
basic_block_id integer references basic_blocks(id) on delete cascade,
instruction_id integer references instructions(id) on delete cascade)"""
cur.execute(sql)
sql = "create index if not exists idx_bb_instructions on bb_instructions (basic_block_id, instruction_id)"
cur.execute(sql)
sql = """ create table if not exists function_bblocks (
id integer primary key,
function_id integer not null references functions(id) on delete cascade,
basic_block_id integer not null references basic_blocks(id) on delete cascade)"""
cur.execute(sql)
sql = "create index if not exists id_function_blocks on function_bblocks (function_id, basic_block_id)"
cur.execute(sql)
cur.execute("select 1 from version")
row = cur.fetchone()
if not row:
cur.execute("insert into main.version values ('%s')" % VERSION_VALUE)
sql = "create index if not exists idx_assembly on functions(assembly)"
cur.execute(sql)
sql = "create index if not exists idx_bytes_hash on functions(bytes_hash)"
cur.execute(sql)
sql = "create index if not exists idx_pseudocode on functions(pseudocode)"
cur.execute(sql)
sql = "create index if not exists idx_name on functions(name)"
cur.execute(sql)
sql = "create index if not exists idx_mangled_name on functions(mangled_function)"
cur.execute(sql)
sql = "create index if not exists idx_names on functions(names)"
cur.execute(sql)
sql = "create index if not exists idx_asm_pseudo on functions(assembly, pseudocode)"
cur.execute(sql)
sql = "create index if not exists idx_nodes_edges_instructions on functions(nodes, edges, instructions)"
cur.execute(sql)
sql = "create index if not exists idx_composite1 on functions(nodes, edges, mnemonics, names, cyclomatic_complexity, prototype2, indegree, outdegree)"
cur.execute(sql)
sql = "create index if not exists idx_composite2 on functions(instructions, mnemonics, names)"
cur.execute(sql)
sql = "create index if not exists idx_composite3 on functions(nodes, edges, cyclomatic_complexity)"
cur.execute(sql)
sql = "create index if not exists idx_composite4 on functions(pseudocode_lines, pseudocode)"
cur.execute(sql)
sql = "create index if not exists idx_composite5 on functions(pseudocode_lines, pseudocode_primes)"
cur.execute(sql)
sql = "create index if not exists idx_composite6 on functions(names, mnemonics)"
cur.execute(sql)
sql = "create index if not exists idx_pseudocode_hash1 on functions(pseudocode_hash1)"
cur.execute(sql)
sql = "create index if not exists idx_pseudocode_hash2 on functions(pseudocode_hash2)"
cur.execute(sql)
sql = "create index if not exists idx_pseudocode_hash3 on functions(pseudocode_hash3)"
cur.execute(sql)
sql = "create index if not exists idx_pseudocode_hash on functions(pseudocode_hash1, pseudocode_hash2, pseudocode_hash3)"
cur.execute(sql)
sql = "create index if not exists idx_strongly_connected on functions(strongly_connected)"
cur.execute(sql)
sql = "create index if not exists idx_strongly_connected_spp on functions(strongly_connected_spp)"
cur.execute(sql)
sql = "create index if not exists idx_loops on functions(loops)"
cur.execute(sql)
sql = "create index if not exists idx_rva on functions(rva)"
cur.execute(sql)
sql = "create index if not exists idx_tarjan_topological_sort on functions(tarjan_topological_sort)"
cur.execute(sql)
sql = "create index if not exists idx_mnemonics_spp on functions(mnemonics_spp)"
cur.execute(sql)
cur.close()
def add_program_data(self, type_name, key, value):
cur = self.db_cursor()
sql = "insert into main.program_data (name, type, value) values (?, ?, ?)"
values = (key, type_name, value)
cur.execute(sql, values)
cur.close()
def read_function(self, f, discard=False):
name = GetFunctionName(int(f))
true_name = name
demangled_name = Demangle(name, INF_SHORT_DN)
if demangled_name is not None:
name = demangled_name
f = int(f)
func = get_func(f)
flow = FlowChart(func)
size = func.endEA - func.startEA
if not self.ida_subs:
# Unnamed function, ignore it...
if name.startswith("sub_") or name.startswith("j_") or name.startswith("unknown"):
return False
# Already recognized runtime's function?
flags = GetFunctionFlags(f)
if flags & FUNC_LIB or flags == -1:
return False
nodes = 0
edges = 0
instructions = 0
mnems = []
dones = {}
names = set()
bytes_hash = []
outdegree = 0
indegree = len(list(CodeRefsTo(f, 1)))
assembly = {}
basic_blocks_data = {}
bb_relations = {}
bb_topo_num = {}
bb_topological = {}
mnemonics_spp = 1
cpu_ins_list = GetInstructionList()
image_base = self.get_base_address()
for block in flow:
nodes += 1
instructions_data = []
block_ea = block.startEA - image_base
idx = len(bb_topological)
bb_topological[idx] = []
bb_topo_num[block_ea] = idx
for x in list(Heads(block.startEA, block.endEA)):
mnem = GetMnem(x)
disasm = GetDisasm(x)
if mnem in cpu_ins_list:
mnemonics_spp += self.primes[cpu_ins_list.index(mnem)]
try:
assembly[block_ea].append(disasm)
except KeyError:
if nodes == 1:
assembly[block_ea] = [disasm]
else:
assembly[block_ea] = ["loc_%x:" % x, disasm]
instructions += 1
bytes_hash.append(chr(Byte(x)))
outdegree += len(list(CodeRefsFrom(x, 0)))
mnems.append(mnem)
op_value = GetOperandValue(x, 1)
if op_value == BADADDR:
op_value = GetOperandValue(x, 0)
if op_value != BADADDR and op_value in self.names:
tmp_name = self.names[op_value]
demangled_name = Demangle(name, INF_SHORT_DN)
if demangled_name is not None:
tmp_name = demangled_name
if not tmp_name.startswith("sub_"):
names.add(tmp_name)
ins_cmt1 = GetCommentEx(x, 0)
ins_cmt2 = GetCommentEx(x, 1)
instructions_data.append([x - image_base, mnem, disasm, ins_cmt1, ins_cmt2])
basic_blocks_data[block_ea] = instructions_data
bb_relations[block_ea] = []
for succ_block in block.succs():
succ_base = succ_block.startEA - image_base
bb_relations[block_ea].append(succ_base)
edges += 1
indegree += 1
if not dones.has_key(succ_block.id):
dones[succ_block] = 1
for pred_block in block.preds():
try:
bb_relations[pred_block.startEA - image_base].append(block.startEA - image_base)
except KeyError:
bb_relations[pred_block.startEA - image_base] = [block.startEA - image_base]
edges += 1
outdegree += 1
if not dones.has_key(succ_block.id):
dones[succ_block] = 1
for block in flow:
block_ea = block.startEA - image_base
for succ_block in block.succs():
succ_base = succ_block.startEA - image_base
bb_topological[bb_topo_num[block_ea]].append(bb_topo_num[succ_base])
strongly_connected_spp = 0
try:
strongly_connected = strongly_connected_components(bb_relations)
bb_topological = robust_topological_sort(bb_topological)
bb_topological = json.dumps(bb_topological)
strongly_connected_spp = 1
for item in strongly_connected:
val = len(item)
if val > 1:
strongly_connected_spp *= self.primes[val]
except:
# XXX: FIXME: The original implementation that we're using is
# recursive and can fail. We really need to create our own non
# recursive version.
strongly_connected = []
bb_topological = None
loops = 0
for sc in strongly_connected:
if len(sc) > 1:
loops += 1
else:
if sc[0] in bb_relations and sc[0] in bb_relations[sc[0]]:
loops += 1
keys = assembly.keys()
keys.sort()
asm = []
for key in keys:
asm.extend(assembly[key])
asm = "\n".join(asm)
cc = edges - nodes + 2
proto = self.guess_type(f)
proto2 = GetType(f)
prime = str(self.primes[cc])
comment = GetFunctionCmt(f, 1)
bytes_hash = md5("".join(bytes_hash)).hexdigest()
function_flags = GetFunctionFlags(f)
pseudo = None
pseudo_hash1 = None
pseudo_hash2 = None
pseudo_hash3 = None
pseudo_lines = 0
pseudocode_primes = None
if f in self.pseudo:
pseudo = "\n".join(self.pseudo[f])
pseudo_lines = len(self.pseudo[f])
pseudo_hash1, pseudo_hash2, pseudo_hash3 = self.kfh.hash_bytes(pseudo).split(";")
if pseudo_hash1 == "":
pseudo_hash1 = None
if pseudo_hash2 == "":
pseudo_hash2 = None
if pseudo_hash3 == "":
pseudo_hash3 = None
pseudocode_primes = str(self.pseudo_hash[f])
clean_assembly = self.get_cmp_asm_lines(asm)
clean_pseudo = self.get_cmp_pseudo_lines(pseudo)
rva = f - self.get_base_address()
return (name, nodes, edges, indegree, outdegree, size, instructions, mnems, names,
proto, cc, prime, f, comment, true_name, bytes_hash, pseudo, pseudo_lines,
pseudo_hash1, pseudocode_primes, function_flags, asm, proto2,
pseudo_hash2, pseudo_hash3, len(strongly_connected), loops, rva, bb_topological,
strongly_connected_spp, clean_assembly, clean_pseudo, mnemonics_spp,
basic_blocks_data, bb_relations)
def get_base_address(self):
return idaapi.get_imagebase()
def get_instruction_id(self, addr):
cur = self.db_cursor()
sql = "select id from instructions where address = ?"
cur.execute(sql, (str(addr),))
row = cur.fetchone()
rowid = None
if row is not None:
rowid = row[0]
cur.close()
return rowid
def get_bb_id(self, addr):
cur = self.db_cursor()
sql = "select id from basic_blocks where address = ?"
cur.execute(sql, (str(addr),))
row = cur.fetchone()
rowid = None
if row is not None:
rowid = row[0]
cur.close()
return rowid
def save_function(self, props):
cur = self.db_cursor()
new_props = []
for prop in props[:len(props)-2]:
# XXX: Fixme! This is a hack for 64 bit architectures kernels
if type(prop) is long and prop > 0xFFFFFFFF:
prop = str(prop)
if type(prop) is list or type(prop) is set:
new_props.append(json.dumps(list(prop)))
else:
new_props.append(prop)
sql = """insert into main.functions (name, nodes, edges, indegree, outdegree, size,
instructions, mnemonics, names, prototype,
cyclomatic_complexity, primes_value, address,
comment, mangled_function, bytes_hash, pseudocode,
pseudocode_lines, pseudocode_hash1, pseudocode_primes,
function_flags, assembly, prototype2, pseudocode_hash2,
pseudocode_hash3, strongly_connected, loops, rva,
tarjan_topological_sort, strongly_connected_spp,
clean_assembly, clean_pseudo, mnemonics_spp)
values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?,
?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?,
?, ?, ?)"""
cur.execute(sql, new_props)
func_id = cur.lastrowid
if not self.function_summaries_only:
bb_data, bb_relations = props[len(props)-2:]
instructions_ids = {}
sql = """insert into main.instructions (address, mnemonic, disasm, comment1, comment2)
values (?, ?, ?, ?, ?)"""
self_get_instruction_id = self.get_instruction_id
cur_execute = cur.execute
for key in bb_data:
for insn in bb_data[key]:
addr, mnem, disasm, cmt1, cmt2 = insn
db_id = self_get_instruction_id(str(addr))
if db_id is None:
cur_execute(sql, (str(addr), mnem, disasm, cmt1, cmt2))
db_id = cur.lastrowid
instructions_ids[addr] = db_id
num = 0
bb_ids = {}
sql1 = "insert into main.basic_blocks (num, address) values (?, ?)"
sql2 = "insert into main.bb_instructions (basic_block_id, instruction_id) values (?, ?)"
self_get_bb_id = self.get_bb_id
for key in bb_data:
# Insert each basic block
num += 1
ins_ea = str(key)
last_bb_id = self_get_bb_id(ins_ea)
if last_bb_id is None:
cur_execute(sql1, (num, ins_ea))
last_bb_id = cur.lastrowid
bb_ids[ins_ea] = last_bb_id
# Insert relations between basic blocks and instructions
for insn in bb_data[key]:
ins_id = instructions_ids[insn[0]]
cur_execute(sql2, (last_bb_id, ins_id))
# Insert relations between basic blocks
sql = "insert into main.bb_relations (parent_id, child_id) values (?, ?)"
for key in bb_relations:
for bb in bb_relations[key]:
bb = str(bb)
key = str(key)
cur_execute(sql, (bb_ids[key], bb_ids[bb]))
# And finally insert the functions to basic blocks relations
sql = "insert into main.function_bblocks (function_id, basic_block_id) values (?, ?)"
for key in bb_ids:
bb_id = bb_ids[key]
cur_execute(sql, (func_id, bb_id))
cur.close()
def save_callgraph(self, primes, all_primes, md5sum):
cur = self.db_cursor()
sql = "insert into main.program (callgraph_primes, callgraph_all_primes, md5sum) values (?, ?, ?)"
cur.execute(sql, (primes, all_primes, md5sum))
cur.close()
def export_structures(self):
# It seems that GetMaxLocalType, sometimes, can return negative
# numbers, according to one beta-tester. My guess is that it's a bug
# in IDA. However, as we cannot reproduce, at least handle this
# condition.
local_types = GetMaxLocalType()
if (local_types & 0x80000000) != 0:
log("Warning: GetMaxLocalType returned a negative number (0x%x)!" % local_types)
return
for i in range(local_types):
try:
name = GetLocalTypeName(i+1)
definition = GetLocalType(i+1, PRTYPE_MULTI|PRTYPE_TYPE|PRTYPE_SEMI|PRTYPE_PRAGMA)
type_name = "struct"
if definition.startswith("enum"):
type_name = "enum"
self.add_program_data(type_name, name, definition)
except:
pass
def get_til_names(self):
idb_path = GetIdbPath()
filename, ext = os.path.splitext(idb_path)
til_path = "%s.til" % filename
with open(til_path, "rb") as f:
line = f.readline()
pos = line.find("Local type definitions")
if pos > -1:
tmp = line[pos+len("Local type definitions")+1:]
pos = tmp.find("\x00")
if pos > -1:
defs = tmp[:pos].split(",")
return defs
return None
def export_til(self):
til_names = self.get_til_names()
if til_names is not None:
for til in til_names:
self.add_program_data("til", til, None)
def do_export(self):
i = 0
callgraph_primes = 1
callgraph_all_primes = {}
func_list = list(Functions(self.min_ea, self.max_ea))
total_funcs = len(func_list)
t = time.time()
for func in func_list:
i += 1
if i % 100 == 0 or i == 1:
line = "Exported %d function(s) out of %d total.\nElapsed %d second(s), remaining ~%d second(s)"
elapsed = time.time() - t
remaining = (elapsed / i) * (total_funcs - i)
replace_wait_box(line % (i, total_funcs, int(elapsed), int(remaining)))
props = self.read_function(func)
if props == False:
continue
ret = props[11]
callgraph_primes *= decimal.Decimal(ret)
try:
callgraph_all_primes[ret] += 1
except KeyError:
callgraph_all_primes[ret] = 1
self.save_function(props)
md5sum = GetInputFileMD5()
self.save_callgraph(str(callgraph_primes), json.dumps(callgraph_all_primes), md5sum)
self.export_structures()
self.export_til()
def export(self):
try:
show_wait_box("Exporting database")
self.do_export()
finally:
hide_wait_box()
self.db.commit()
cur = self.db_cursor()
cur.execute("analyze")
cur.close()
self.db_close()
def import_til(self):
log("Importing type libraries...")
cur = self.db_cursor()
sql = "select name from diff.program_data where type = 'til'"
cur.execute(sql)
for row in cur.fetchall():
LoadTil(row[0])
cur.close()
Wait()
def get_valid_definition(self, defs):
""" Try to get a valid structure definition by removing (yes) the
invalid characters typically found in IDA's generated structs."""
ret = defs.replace("?", "_").replace("@", "_")
ret = ret.replace("$", "_")
return ret
def import_definitions(self):
cur = self.db_cursor()
sql = "select type, name, value from diff.program_data where type in ('structure', 'struct', 'enum')"
cur.execute(sql)
rows = cur.fetchall()
new_rows = set()
for row in rows:
if GetStrucIdByName(row[1]) == BADADDR:
type_name = "struct"
if row[0] == "enum":
type_name = "enum"
new_rows.add(row)
ret = ParseTypes("%s %s;" % (type_name, row[1]))
if ret != 0:
pass
for i in xrange(10):
for row in new_rows:
if GetStrucIdByName(row[1]) == BADADDR:
definition = self.get_valid_definition(row[2])
ret = ParseTypes(definition)
if ret != 0:
pass
cur.close()
Wait()
def import_one(self, item):
ret = askyn_c(1, "AUTOHIDE DATABASE\nDo you want to import all the type libraries, structs and enumerations?")
if ret == 1:
# Import all the type libraries from the diff database
self.import_til()
# Import all the struct and enum definitions
self.import_definitions()
elif ret == -1:
return
# Import just the selected item
ea1 = str(int(item[1], 16))
ea2 = str(int(item[3], 16))
self.do_import_one(ea1, ea2, True)
new_func = self.read_function(str(ea1))
self.delete_function(ea1)
self.save_function(new_func)
self.db.commit()
def prettify_asm(self, asm_source):
asm = []
for line in asm_source.split("\n"):
if not line.startswith("loc_"):
asm.append("\t" + line)
else:
asm.append(line)
return "\n".join(asm)
def show_asm_diff(self, item):
cur = self.db_cursor()
sql = """select *
from (
select prototype, assembly, name, 1
from functions
where address = ?
and assembly is not null
union select prototype, assembly, name, 2
from diff.functions
where address = ?
and assembly is not null)
order by 4 asc"""
ea1 = str(int(item[1], 16))
ea2 = str(int(item[3], 16))
cur.execute(sql, (ea1, ea2))
rows = cur.fetchall()
if len(rows) != 2:
Warning("Sorry, there is no assembly available for either the first or the second database.")
else:
row1 = rows[0]
row2 = rows[1]
html_diff = HtmlDiff()
asm1 = self.prettify_asm(row1[1])
asm2 = self.prettify_asm(row2[1])
buf1 = "%s proc near\n%s\n%s endp" % (row1[2], asm1, row1[2])
buf2 = "%s proc near\n%s\n%s endp" % (row2[2], asm2, row2[2])
src = html_diff.make_file(buf1.split("\n"), buf2.split("\n"))
title = "Diff assembler %s - %s" % (row1[2], row2[2])
cdiffer = CHtmlViewer()
cdiffer.Show(src, title)
cur.close()
def show_asm(self, item, primary):
cur = self.db_cursor()
if primary:
db = "main"
else:
db = "diff"
ea = str(int(item[1], 16))
sql = "select prototype, assembly, name from %s.functions where address = ?"
sql = sql % db
cur.execute(sql, (ea, ))
row = cur.fetchone()
if row is None:
Warning("Sorry, there is no assembly available for the selected function.")
else:
fmt = HtmlFormatter()
fmt.noclasses = True
fmt.linenos = True
asm = self.prettify_asm(row[1])
final_asm = "; %s\n%s proc near\n%s\n%s endp\n"
final_asm = final_asm % (row[0], row[2], asm, row[2])
src = highlight(final_asm, NasmLexer(), fmt)
title = "Assembly for %s" % row[2]
cdiffer = CHtmlViewer()
cdiffer.Show(src, title)
cur.close()
def get_cmp_asm_lines(self, asm):
sio = StringIO(asm)
lines = []
get_cmp_asm = self.get_cmp_asm
for line in sio.readlines():
line = line.strip("\n")
lines.append(get_cmp_asm(line))
return "\n".join(lines)
def get_cmp_pseudo_lines(self, pseudo):
if pseudo is None:
return pseudo
# Remove all the comments
tmp = re.sub(" // .*", "", pseudo)
# Now, replace sub_, byte_, word_, dword_, loc_, etc...
for rep in CMP_REPS:
tmp = re.sub(rep + "[a-f0-9A-F]+", rep + "XXXX", tmp)
tmp = re.sub("v[0-9]+", "vXXX", tmp)
tmp = re.sub("a[0-9]+", "aXXX", tmp)
return tmp
def get_cmp_asm(self, asm):
if asm is None:
return asm
tmp = asm.split(";")[0]
tmp = asm.split(" # ")[0]
# Now, replace sub_, byte_, word_, dword_, loc_, etc...
for rep in CMP_REPS:
tmp = re.sub(rep + "[a-f0-9A-F]+", "XXXX", tmp)
reps = ["\+[a-f0-9A-F]+h\+"]
for rep in reps:
tmp = re.sub(rep, "+XXXX+", tmp)
tmp = re.sub("\.\.[a-f0-9A-F]{8}", "XXX", tmp)
return tmp
def compare_graphs_pass(self, bblocks1, bblocks2, colours1, colours2, is_second = False):
dones1 = set()
dones2 = set()
# Now compare each basic block from the first function to all the
# basic blocks in the 2nd function
for key1 in bblocks1:
if key1 in dones1:
continue
for key2 in bblocks2:
if key2 in dones2:
continue
# Same number of instructions?
if len(bblocks1[key1]) == len(bblocks2[key2]):
mod = False
partial = True
i = 0
for ins1 in bblocks1[key1]:
ins2 = bblocks2[key2][i]
# Same mnemonic? The change can be only partial
if ins1[1] != ins2[1]:
partial = False
# Try to compare the assembly after doing some cleaning
cmp_asm1 = self.get_cmp_asm(ins1[2])
cmp_asm2 = self.get_cmp_asm(ins2[2])
if cmp_asm1 != cmp_asm2:
mod = True
if not partial:
continue
i += 1
if not mod:
# Perfect match, we discovered a basic block equal in both
# functions
colours1[key1] = 0xffffff
colours2[key2] = 0xffffff
dones1.add(key1)
dones2.add(key2)
break
elif not is_second and partial:
# Partial match, we discovered a basic block with the same
# mnemonics but something changed
#
# NOTE:
# Do not add the partial matches to the dones lists, as we
# can have complete matches after a partial match!
colours1[key1] = 0xCCffff
colours2[key2] = 0xCCffff
break
return colours1, colours2
def compare_graphs(self, g1, ea1, g2, ea2):
colours1 = {}
colours2 = {}
bblocks1 = g1[0]
bblocks2 = g2[0]
# Consider, by default, all blocks added, news
for key1 in bblocks1:
colours1[key1] = 0xCCCCFF
for key2 in bblocks2:
colours2[key2] = 0xCCCCFF
colours1, colours2 = self.compare_graphs_pass(bblocks1, bblocks2, colours1, colours2, False)
colours1, colours2 = self.compare_graphs_pass(bblocks1, bblocks2, colours1, colours2, True)
return colours1, colours2
def graph_diff(self, ea1, name1, ea2, name2):
g1 = self.get_graph(str(ea1), True)
g2 = self.get_graph(str(ea2))
if g1 == ({}, {}) or g2 == ({}, {}):
Warning("Sorry, graph information is not available for one of the databases.")
return False
colours = self.compare_graphs(g1, ea1, g2, ea2)
title1 = "Graph for %s (primary)" % name1
title2 = "Graph for %s (secondary)" % name2
graph1 = CDiffGraphViewer(title1, g1, colours[0])
graph2 = CDiffGraphViewer(title2, g2, colours[1])
graph1.Show()
graph2.Show()
set_dock_pos(title1, title2, DP_RIGHT)
uitimercallback_t(graph1, 100)
uitimercallback_t(graph2, 100)
def get_graph(self, ea1, primary=False):
if primary:
db = "main"
else:
db = "diff"
cur = self.db_cursor()
dones = set()
sql = """ select bb.address, ins.address, ins.mnemonic, ins.disasm
from %s.function_bblocks fb,
%s.bb_instructions bbins,
%s.instructions ins,
%s.basic_blocks bb,
%s.functions f
where ins.id = bbins.instruction_id
and bbins.basic_block_id = bb.id
and bb.id = fb.basic_block_id
and f.id = fb.function_id
and f.address = ?
order by bb.address asc""" % (db, db, db, db, db)
cur.execute(sql, (ea1,))
bb_blocks = {}
for row in cur.fetchall():
bb_ea = str(int(row[0]))
ins_ea = str(int(row[1]))
mnem = row[2]
dis = row[3]
if ins_ea in dones:
continue
dones.add(ins_ea)
try:
bb_blocks[bb_ea].append([ins_ea, mnem, dis])
except KeyError:
bb_blocks[bb_ea] = [ [ins_ea, mnem, dis] ]
sql = """ select (select address
from %s.basic_blocks
where id = bbr.parent_id),
(select address
from %s.basic_blocks
where id = bbr.child_id)
from %s.bb_relations bbr,
%s.function_bblocks fbs,
%s.basic_blocks bbs,
%s.functions f
where f.id = fbs.function_id
and bbs.id = fbs.basic_block_id
and fbs.basic_block_id = bbr.child_id
and f.address = ?
order by 1 asc, 2 asc""" % (db, db, db, db, db, db)
cur.execute(sql, (ea1, ))
rows = cur.fetchall()
bb_relations = {}
for row in rows:
bb_ea1 = str(row[0])
bb_ea2 = str(row[1])
try:
bb_relations[bb_ea1].add(bb_ea2)
except KeyError:
bb_relations[bb_ea1] = set([bb_ea2])
cur.close()
return bb_blocks, bb_relations
def show_pseudo(self, item, primary):
cur = self.db_cursor()
if primary:
db = "main"
else:
db = "diff"
ea = str(int(item[1], 16))
sql = "select prototype, pseudocode, name from %s.functions where address = ?"
sql = sql % db
cur.execute(sql, (str(ea), ))
row = cur.fetchone()
if row is None:
Warning("Sorry, there is no pseudo-code available for the selected function.")
else:
fmt = HtmlFormatter()
fmt.noclasses = True
fmt.linenos = True
func = "%s\n%s" % (row[0], row[1])
src = highlight(func, CppLexer(), fmt)
title = "Pseudo-code for %s" % row[2]
cdiffer = CHtmlViewer()
cdiffer.Show(src, title)
cur.close()
def show_pseudo_diff(self, item):
cur = self.db_cursor()
sql = """select *
from (
select prototype, pseudocode, name, 1
from functions
where address = ?
and pseudocode is not null
union select prototype, pseudocode, name, 2
from diff.functions
where address = ?
and pseudocode is not null)
order by 4 asc"""
ea1 = str(int(item[1], 16))
ea2 = str(int(item[3], 16))
cur.execute(sql, (ea1, ea2))
rows = cur.fetchall()
if len(rows) != 2:
Warning("Sorry, there is no pseudo-code available for either the first or the second database.")
else:
row1 = rows[0]
row2 = rows[1]
html_diff = HtmlDiff()
buf1 = row1[0] + "\n" + row1[1]
buf2 = row2[0] + "\n" + row2[1]
src = html_diff.make_file(buf1.split("\n"), buf2.split("\n"))
title = "Diff pseudo-code %s - %s" % (row1[2], row2[2])
cdiffer = CHtmlViewer()
cdiffer.Show(src, title)
cur.close()
def delete_function(self, ea):
cur = self.db_cursor()
cur.execute("delete from functions where address = ?", (ea, ))
cur.close()
def do_import_one(self, ea1, ea2, force = False):
cur = self.db_cursor()
sql = "select prototype, comment, mangled_function, function_flags from diff.functions where address = ?"
cur.execute(sql, (ea2,))
row = cur.fetchone()
if row is not None:
proto = row[0]
comment = row[1]
name = row[2]
flags = row[3]
ea1 = int(ea1)
if not name.startswith("sub_") or force:
if not MakeNameEx(ea1, name, SN_NOWARN|SN_NOCHECK):
for i in xrange(10):
if MakeNameEx(ea1, "%s_%d" % (name, i), SN_NOWARN|SN_NOCHECK):
break
if proto is not None and proto != "int()":
SetType(ea1, proto)
if comment is not None and comment != "":
SetFunctionCmt(ea1, comment, 1)
if flags is not None:
SetFunctionFlags(ea1, flags)
cur.close()
def import_selected(self, items, selected):
# Import all the type libraries from the diff database
self.import_til()
# Import all the struct and enum definitions
self.import_definitions()
new_items = []
for item in selected:
new_items.append(items[item-1])
self.import_items(new_items)
def import_items(self, items):
to_import = set()
# Import all the function names and comments
for item in items:
ea1 = str(int(item[1], 16))
ea2 = str(int(item[3], 16))
self.do_import_one(ea1, ea2)
to_import.add(ea1)
try:
show_wait_box("Updating primary database...")
for ea in to_import:
ea = str(ea)
new_func = self.read_function(ea)
self.delete_function(ea)
self.save_function(new_func)
self.db.commit()
finally:
hide_wait_box()
def do_import_all(self, items):
# Import all the type libraries from the diff database
self.import_til()
# Import all the struct and enum definitions
self.import_definitions()
# Import all the items in the chooser
self.import_items(items)
def do_import_all_auto(self, items):
# Import all the type libraries from the diff database
self.import_til()
# Import all the struct and enum definitions
self.import_definitions()
# Import all the items in the chooser for sub_* functions
new_items = []
for item in items:
name1 = item[2]
if name1.startswith("sub_"):
new_items.append(item)
self.import_items(new_items)
def re_diff(self):
self.best_chooser.Close()
self.partial_chooser.Close()
if self.unreliable_chooser is not None:
self.unreliable_chooser.Close()
if self.unmatched_primary is not None:
self.unmatched_primary.Close()
if self.unmatched_second is not None:
self.unmatched_second.Close()
ret = askyn_c(1, "Do you want to show only the new matches?")
if ret == -1:
return
elif ret == 0:
self.matched1 = set()
self.matched2 = set()
self.diff(self.last_diff_db)
def import_all(self, items):
try:
self.do_import_all(items)
msg = "AUTOHIDE DATABASE\nHIDECANCEL\nAll functions were imported. Do you want to relaunch the diffing process?"
if askyn_c(1, msg) == 1:
self.db.execute("detach diff")
# We cannot run that code here or otherwise IDA will crash corrupting the stack
timeraction_t(self.re_diff, None, 1000)
except:
log("import_all(): %s" % str(sys.exc_info()[1]))
traceback.print_exc()
def import_all_auto(self, items):
try:
self.do_import_all_auto(items)
except:
log("import_all(): %s" % str(sys.exc_info()[1]))
traceback.print_exc()
def equal_db(self):
cur = self.db_cursor()
sql = "select count(*) from program p, diff.program dp where p.md5sum = dp.md5sum"
cur.execute(sql)
row = cur.fetchone()
ret = row[0] == 1
if not ret:
sql = "select count(*) from (select * from functions except select * from diff.functions) x"
cur.execute(sql)
row = cur.fetchone()
else:
log("Same MD5 in both databases")
cur.close()
return row[0] == 0
def check_callgraph(self):
cur = self.db_cursor()
sql = """select callgraph_primes, callgraph_all_primes from program
union all
select callgraph_primes, callgraph_all_primes from diff.program"""
cur.execute(sql)
rows = cur.fetchall()
if len(rows) == 2:
cg1 = decimal.Decimal(rows[0][0])
cg_factors1 = json.loads(rows[0][1])
cg2 = decimal.Decimal(rows[1][0])
cg_factors2 = json.loads(rows[1][1])
if cg1 == cg2:
self.equal_callgraph = True
log("Callgraph signature for both databases is equal, the programs seem to be 100% equal structurally")
Warning("Callgraph signature for both databases is equal, the programs seem to be 100% equal structurally")
else:
FACTORS_CACHE[cg1] = cg_factors1
FACTORS_CACHE[cg2] = cg_factors2
diff = difference(cg1, cg2)
total = sum(cg_factors1.values())
percent = diff * 100. / total
log("Callgraphs from both programs differ in %f%%" % percent)
cur.close()
def find_equal_matches(self):
cur = self.db_cursor()
# Start by calculating the total number of functions in both databases
sql = """select count(*) total1 from functions
union all
select count(*) total2 from diff.functions"""
cur.execute(sql)
rows = cur.fetchall()
if len(rows) != 2:
Warning("Malformed database, only %d rows!" % len(rows))
raise Exception("Malformed database!")
self.total_functions1 = rows[0][0]
self.total_functions2 = rows[1][0]
sql = "select address, mangled_function from (select * from functions intersect select * from diff.functions) x"
cur.execute(sql)
rows = cur.fetchall()
choose = self.best_chooser
if len(rows) > 0:
for row in rows:
name = row[1]
ea = LocByName(name)
ea2 = row[0]
choose.add_item(CChooser.Item(ea, name, ea2, name, "100% equal", 1))
self.matched1.add(name)
self.matched2.add(name)
if self.equal_callgraph and not self.ignore_all_names:
self.find_same_name(self.partial_chooser)
sql = """select f.address, f.name, df.address, df.name, 'Equal pseudo-code' description
from functions f,
diff.functions df
where f.pseudocode = df.pseudocode
and df.pseudocode is not null
and f.pseudocode_lines >= 5
union
select f.address, f.name, df.address, df.name, 'Equal assembly' description
from functions f,
diff.functions df
where f.assembly = df.assembly
and df.assembly is not null
"""
log_refresh("Finding with heuristic 'Equal assembly or pseudo-code'")
self.add_matches_from_query(sql, choose)
sql = """ select distinct f.address ea, f.name name1, df.address ea2, df.name name2,
'Bytes hash and names' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.names = df.names
and f.bytes_hash = df.bytes_hash
and f.names != '[]'"""
log_refresh("Finding with heuristic 'Bytes hash and names'")
self.add_matches_from_query_ratio(sql, self.best_chooser, self.partial_chooser, self.unreliable_chooser)
sql = """ select distinct f.address ea, f.name name1, df.address ea2, df.name name2,
'Same cleaned up assembly or pseudo-code' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.clean_assembly = df.clean_assembly
or f.clean_pseudo = df.clean_pseudo"""
log_refresh("Finding with heuristic 'Same cleaned up assembly or pseudo-code'")
self.add_matches_from_query_ratio(sql, self.best_chooser, self.partial_chooser, self.unreliable_chooser)
sql = """select f.address, f.name, df.address, df.name, 'Same address, nodes, edges and mnemonics' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.rva = df.rva
and f.instructions = df.instructions
and f.nodes = df.nodes
and f.edges = df.edges
and f.mnemonics = df.mnemonics"""
log_refresh("Finding with heuristic 'Same address, nodes, edges and mnemonics'")
self.add_matches_from_query_ratio(sql, self.best_chooser, self.partial_chooser, None)
cur.close()
def decompile_and_get(self, ea):
if not init_hexrays_plugin():
return False
f = get_func(ea)
if f is None:
return False
cfunc = decompile(f);
if cfunc is None:
# Failed to decompile
return False
visitor = CAstVisitor(cfunc)
visitor.apply_to(cfunc.body, None)
self.pseudo_hash[ea] = visitor.primes_hash
sv = cfunc.get_pseudocode();
self.pseudo[ea] = []
first_line = None
for sline in sv:
line = tag_remove(sline.line);
if line.startswith("//"):
continue
if first_line is None:
first_line = line
else:
self.pseudo[ea].append(line)
return first_line
def guess_type(self, ea):
t = GuessType(ea)
if not self.use_decompiler_always:
return t
else:
try:
ret = self.decompile_and_get(ea)
if ret:
t = ret
except:
log("Cannot decompile 0x%x: %s" % (ea, str(sys.exc_info()[1])))
return t
def ast_ratio(self, ast1, ast2):
if not self.relaxed_ratio:
return 0
return ast_ratio(ast1, ast2)
def check_ratio(self, ast1, ast2, pseudo1, pseudo2, asm1, asm2):
fratio = quick_ratio
decimal_values = "{0:.2f}"
if self.relaxed_ratio:
fratio = real_quick_ratio
decimal_values = "{0:.1f}"
v3 = 0
ast_done = False
if self.relaxed_ratio and ast1 is not None and ast2 is not None and max(len(ast1), len(ast2)) < 16:
ast_done = True
v3 = self.ast_ratio(ast1, ast2)
if v3 == 1:
return 1.0
v1 = 0
if pseudo1 is not None and pseudo2 is not None and pseudo1 != "" and pseudo2 != "":
tmp1 = self.get_cmp_pseudo_lines(pseudo1)
tmp2 = self.get_cmp_pseudo_lines(pseudo2)
if tmp1 == "" or tmp2 == "":
log("Error cleaning pseudo-code!")
print tmp1
print tmp2
else:
v1 = fratio(tmp1, tmp2)
v1 = float(decimal_values.format(v1))
if v1 == 1.0:
# If real_quick_ratio returns 1 try again with quick_ratio
# because it can result in false positives. If real_quick_ratio
# says 'different', there is no point in continuing.
if fratio == real_quick_ratio:
v1 = quick_ratio(tmp1, tmp2)
if v1 == 1.0:
return 1.0
tmp_asm1 = self.get_cmp_asm_lines(asm1)
tmp_asm2 = self.get_cmp_asm_lines(asm2)
v2 = fratio(tmp_asm1, tmp_asm2)
v2 = float(decimal_values.format(v2))
if v2 == 1:
# Actually, same as the quick_ratio/real_quick_ratio check done
# with the pseudo-code
if fratio == real_quick_ratio:
v2 = quick_ratio(tmp_asm1, tmp_asm2)
if v2 == 1.0:
return 1.0
if self.relaxed_ratio and not ast_done:
v3 = fratio(ast1, ast2)
v3 = float(decimal_values.format(v3))
if v3 == 1:
return 1.0
r = max(v1, v2, v3)
return r
def all_functions_matched(self):
return len(self.matched1) == self.total_functions1 or \
len(self.matched2) == self.total_functions2
def add_matches_from_query_ratio(self, sql, best, partial, unreliable=None):
if self.all_functions_matched():
return
cur = self.db_cursor()
cur.execute(sql)
i = 0
t = time.time()
while self.max_processed_rows == 0 or (self.max_processed_rows != 0 and i < self.max_processed_rows):
if time.time() - t > self.timeout:
log("Timeout")
break
i += 1
if i % 50000 == 0:
log("Processed %d rows..." % i)
row = cur.fetchone()
if row is None:
break
ea = str(row[0])
name1 = row[1]
ea2 = row[2]
name2 = row[3]
desc = row[4]
pseudo1 = row[5]
pseudo2 = row[6]
asm1 = row[7]
asm2 = row[8]
ast1 = row[9]
ast2 = row[10]
if name1 in self.matched1 or name2 in self.matched2:
continue
r = self.check_ratio(ast1, ast2, pseudo1, pseudo2, asm1, asm2)
if r == 1:
self.best_chooser.add_item(CChooser.Item(ea, name1, ea2, name2, desc, r))
self.matched1.add(name1)
self.matched2.add(name2)
elif r >= 0.5:
partial.add_item(CChooser.Item(ea, name1, ea2, name2, desc, r))
self.matched1.add(name1)
self.matched2.add(name2)
elif r < 5 and unreliable is not None:
unreliable.add_item(CChooser.Item(ea, name1, ea2, name2, desc, r))
self.matched1.add(name1)
self.matched2.add(name2)
else:
partial.add_item(CChooser.Item(ea, name1, ea2, name2, desc, r))
self.matched1.add(name1)
self.matched2.add(name2)
cur.close()
def add_matches_from_query_ratio_max(self, sql, best, partial, val):
if self.all_functions_matched():
return
cur = self.db_cursor()
cur.execute(sql)
i = 0
t = time.time()
while self.max_processed_rows == 0 or (self.max_processed_rows != 0 and i < self.max_processed_rows):
if time.time() - t > self.timeout:
log("Timeout")
break
i += 1
if i % 50000 == 0:
log("Processed %d rows..." % i)
row = cur.fetchone()
if row is None:
break
ea = str(row[0])
name1 = row[1]
ea2 = row[2]
name2 = row[3]
desc = row[4]
pseudo1 = row[5]
pseudo2 = row[6]
asm1 = row[7]
asm2 = row[8]
ast1 = row[9]
ast2 = row[10]
if name1 in self.matched1 or name2 in self.matched2:
continue
r = self.check_ratio(ast1, ast2, pseudo1, pseudo2, asm1, asm2)
if r == 1 and best != self.best_chooser:
self.best_chooser.add_item(CChooser.Item(ea, name1, ea2, name2, desc, r))
self.matched1.add(name1)
self.matched2.add(name2)
elif r > val:
best.add_item(CChooser.Item(ea, name1, ea2, name2, desc, r))
self.matched1.add(name1)
self.matched2.add(name2)
elif partial is not None:
partial.add_item(CChooser.Item(ea, name1, ea2, name2, desc, r))
self.matched1.add(name1)
self.matched2.add(name2)
cur.close()
def add_matches_from_query(self, sql, choose):
""" Warning: use this *only* if the ratio is known to be 1.00 """
if self.all_functions_matched():
return
cur = self.db_cursor()
cur.execute(sql)
i = 0
while 1:
i += 1
if i % 1000 == 0:
log("Processed %d rows..." % i)
row = cur.fetchone()
if row is None:
break
ea = str(row[0])
name1 = row[1]
ea2 = row[2]
name2 = row[3]
desc = row[4]
if name1 in self.matched1 or name2 in self.matched2:
continue
choose.add_item(CChooser.Item(ea, name1, ea2, name2, desc, 1))
self.matched1.add(name1)
self.matched2.add(name2)
cur.close()
def search_small_differences(self, choose):
cur = self.db_cursor()
# Same basic blocks, edges, mnemonics, etc... but different names
sql = """ select distinct f.address ea, f.name name1, df.name name2,
f.names, df.names, df.address ea2
from functions f,
diff.functions df
where f.nodes = df.nodes
and f.edges = df.edges
and f.mnemonics = df.mnemonics
and f.cyclomatic_complexity = df.cyclomatic_complexity
and f.names != '[]'"""
cur.execute(sql)
rows = cur.fetchall()
for row in rows:
ea = str(row[0])
name1 = row[1]
name2 = row[2]
if name1 in self.matched1 or name2 in self.matched2:
continue
s1 = set(json.loads(row[3]))
s2 = set(json.loads(row[4]))
total = max(len(s1), len(s2))
commons = len(s1.intersection(s2))
ratio = (commons * 1.) / total
if ratio >= 0.5:
ea2 = row[5]
choose.add_item(CChooser.Item(ea, name1, ea2, name2, "Nodes, edges, complexity and mnemonics with small differences", ratio))
self.matched1.add(name1)
self.matched2.add(name2)
cur.close()
return
def find_same_name(self, choose):
cur = self.db_cursor()
sql = """select f.address, f.mangled_function, d.address, f.name, d.name, d.mangled_function,
f.pseudocode, d.pseudocode,
f.assembly, d.assembly,
f.pseudocode_primes, d.pseudocode_primes
from functions f,
diff.functions d
where (d.mangled_function = f.mangled_function
or d.name = f.name)"""
log_refresh("Finding with heuristic 'Same name'")
cur.execute(sql)
rows = cur.fetchall()
cur.close()
if len(rows) > 0 and not self.all_functions_matched():
for row in rows:
ea = row[0]
name = row[1]
ea2 = row[2]
name1 = row[3]
name2 = row[4]
name2_1 = row[5]
if name in self.matched1 or name1 in self.matched1 or \
name2 in self.matched2 or name2_1 in self.matched2:
continue
if self.ignore_sub_names and name.startswith("sub_"):
continue
ast1 = row[10]
ast2 = row[11]
pseudo1 = row[6]
pseudo2 = row[7]
asm1 = row[8]
asm2 = row[9]
ratio = self.check_ratio(ast1, ast2, pseudo1, pseudo2, asm1, asm2)
if float(ratio) == 1.0:
self.best_chooser.add_item(CChooser.Item(ea, name, ea2, name, "Perfect match, same name", 1))
else:
choose.add_item(CChooser.Item(ea, name, ea2, name, "Perfect match, same name", ratio))
self.matched1.add(name)
self.matched1.add(name1)
self.matched2.add(name2)
self.matched2.add(name2_1)
def find_matches(self):
choose = self.partial_chooser
if not self.equal_callgraph and not self.ignore_all_names:
self.find_same_name(choose)
sql = """select f.address, f.name, df.address, df.name,
'All attributes' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.nodes = df.nodes
and f.edges = df.edges
and f.indegree = df.indegree
and f.outdegree = df.outdegree
and f.size = df.size
and f.instructions = df.instructions
and f.mnemonics = df.mnemonics
and f.names = df.names
and f.prototype2 = df.prototype2
and f.cyclomatic_complexity = df.cyclomatic_complexity
and f.primes_value = df.primes_value
and f.bytes_hash = df.bytes_hash
and f.pseudocode_hash1 = df.pseudocode_hash1
and f.pseudocode_primes = df.pseudocode_primes
and f.pseudocode_hash2 = df.pseudocode_hash2
and f.pseudocode_hash3 = df.pseudocode_hash3
and f.strongly_connected = df.strongly_connected
and f.loops = df.loops
and f.tarjan_topological_sort = df.tarjan_topological_sort
and f.strongly_connected_spp = df.strongly_connected_spp
union
select f.address, f.name, df.address, df.name,
'Most attributes' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.nodes = df.nodes
and f.edges = df.edges
and f.indegree = df.indegree
and f.outdegree = df.outdegree
and f.size = df.size
and f.instructions = df.instructions
and f.mnemonics = df.mnemonics
and f.names = df.names
and f.prototype2 = df.prototype2
and f.cyclomatic_complexity = df.cyclomatic_complexity
and f.primes_value = df.primes_value
and f.bytes_hash = df.bytes_hash
and f.strongly_connected = df.strongly_connected
and f.loops = df.loops
and f.tarjan_topological_sort = df.tarjan_topological_sort
and f.strongly_connected_spp = df.strongly_connected_spp"""
log_refresh("Finding with heuristic 'All or most attributes'")
self.add_matches_from_query_ratio(sql, self.best_chooser, self.partial_chooser)
sql = """select f.address, f.name, df.address, df.name,
'Same address, nodes, edges and primes (re-ordered instructions)' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.rva = df.rva
and f.instructions = df.instructions
and f.nodes = df.nodes
and f.edges = df.edges
and f.primes_value = df.primes_value
and f.nodes > 3"""
log_refresh("Finding with heuristic 'Same address, nodes, edges and primes (re-ordered instructions)'")
self.add_matches_from_query_ratio_max(sql, self.partial_chooser, self.unreliable_chooser, 0.5)
sql = """ select distinct f.address ea, f.name name1, df.address ea2, df.name name2,
'Import names hash',
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.names = df.names
and f.names != '[]'
and f.nodes = df.nodes
and f.edges = df.edges
and f.instructions = df.instructions"""
log_refresh("Finding with heuristic 'Import names hash'")
self.add_matches_from_query_ratio(sql, self.best_chooser, self.partial_chooser)
sql = """ select f.address ea, f.name name1, df.address ea2, df.name name2,
'Nodes, edges, complexity, mnemonics, names, prototype2, in-degree and out-degree',
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.nodes = df.nodes
and f.edges = df.edges
and f.mnemonics = df.mnemonics
and f.names = df.names
and f.cyclomatic_complexity = df.cyclomatic_complexity
and f.prototype2 = df.prototype2
and f.indegree = df.indegree
and f.outdegree = df.outdegree
and f.nodes > 3
and f.edges > 3
and f.names != '[]'
union
select f.address ea, f.name name1, df.address ea2, df.name name2,
'Nodes, edges, complexity, mnemonics, names and prototype2' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.nodes = df.nodes
and f.edges = df.edges
and f.mnemonics = df.mnemonics
and f.names = df.names
and f.names != '[]'
and f.cyclomatic_complexity = df.cyclomatic_complexity
and f.prototype2 = df.prototype2"""
log_refresh("Finding with heuristic 'Nodes, edges, complexity, mnemonics, names, prototype, in-degree and out-degree'")
self.add_matches_from_query_ratio(sql, self.partial_chooser, self.partial_chooser)
sql = """ select f.address ea, f.name name1, df.address ea2, df.name name2,
'Mnemonics and names' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.mnemonics = df.mnemonics
and f.instructions = df.instructions
and f.names = df.names
and f.names != '[]'"""
log_refresh("Finding with heuristic 'Mnemonics and names'")
self.add_matches_from_query_ratio(sql, choose, choose)
sql = """ select f.address ea, f.name name1, df.address ea2, df.name name2,
'Mnemonics small-primes-product' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.mnemonics_spp = df.mnemonics_spp
and f.instructions = df.instructions
and df.instructions > 5"""
log_refresh("Finding with heuristic 'Mnemonics small-primes-product'")
self.add_matches_from_query_ratio(sql, choose, choose)
# Search using some of the previous criterias but calculating the
# edit distance
log_refresh("Finding with heuristic 'Small names difference'")
self.search_small_differences(choose)
if self.slow_heuristics:
sql = """select distinct f.address, f.name, df.address, df.name, 'Pseudo-code fuzzy hash' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where df.pseudocode_hash1 = f.pseudocode_hash1
or df.pseudocode_hash2 = f.pseudocode_hash2
or df.pseudocode_hash3 = f.pseudocode_hash3"""
log_refresh("Finding with heuristic 'Pseudo-code fuzzy hashes'")
self.add_matches_from_query_ratio(sql, self.best_chooser, choose)
else:
sql = """select distinct f.address, f.name, df.address, df.name, 'Pseudo-code fuzzy hash' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where df.pseudocode_hash1 = f.pseudocode_hash1"""
log_refresh("Finding with heuristic 'Pseudo-code fuzzy hash'")
self.add_matches_from_query_ratio(sql, self.best_chooser, choose)
sql = """select distinct f.address, f.name, df.address, df.name, 'Similar pseudo-code and names' description,
f.pseudocode, df.pseudocode,
f.pseudocode, df.pseudocode,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.pseudocode_lines = df.pseudocode_lines
and f.names = df.names
and df.names != '[]'
and df.pseudocode_lines > 5
and df.pseudocode is not null
and f.pseudocode is not null"""
log_refresh("Finding with heuristic 'Similar pseudo-code and names'")
self.add_matches_from_query_ratio(sql, self.best_chooser, self.partial_chooser, self.unreliable_chooser)
if self.slow_heuristics:
sql = """select distinct f.address, f.name, df.address, df.name, 'Similar pseudo-code' description,
f.pseudocode, df.pseudocode,
f.pseudocode, df.pseudocode,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.pseudocode_lines = df.pseudocode_lines
and df.pseudocode_lines > 5
and df.pseudocode is not null
and f.pseudocode is not null"""
log_refresh("Finding with heuristic 'Similar pseudo-code'")
self.add_matches_from_query_ratio_max(sql, choose, self.unreliable_chooser, 0.6)
sql = """select distinct f.address, f.name, df.address, df.name, 'Pseudo-code fuzzy AST hash' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where df.pseudocode_primes = f.pseudocode_primes
and f.pseudocode_lines > 5
and length(f.pseudocode_primes) >= 35"""
log_refresh("Finding with heuristic 'Pseudo-code fuzzy AST hash'")
self.add_matches_from_query_ratio(sql, self.best_chooser, choose)
if self.slow_heuristics:
sql = """ select distinct f.address, f.name, df.address, df.name, 'Partial pseudo-code fuzzy hash' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where substr(df.pseudocode_hash1, 1, 16) = substr(f.pseudocode_hash1, 1, 16)
or substr(df.pseudocode_hash2, 1, 16) = substr(f.pseudocode_hash2, 1, 16)
or substr(df.pseudocode_hash3, 1, 16) = substr(f.pseudocode_hash3, 1, 16)"""
log_refresh("Finding with heuristic 'Partial pseudo-code fuzzy hash'")
self.add_matches_from_query_ratio_max(sql, choose, self.unreliable_chooser, 0.5)
sql = """select f.address, f.name, df.address, df.name,
'Topological sort hash' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.strongly_connected = df.strongly_connected
and f.tarjan_topological_sort = df.tarjan_topological_sort
and f.strongly_connected > 3"""
log_refresh("Finding with heuristic 'Topological sort hash'")
self.add_matches_from_query_ratio(sql, self.best_chooser, self.partial_chooser, self.unreliable_chooser)
sql = """ select f.address, f.name, df.address, df.name, 'Same high complexity, prototype and names' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.names = df.names
and f.cyclomatic_complexity = df.cyclomatic_complexity
and f.cyclomatic_complexity >= 20
and f.prototype2 = df.prototype2
and df.names != '[]'"""
log_refresh("Finding with heuristic 'Same high complexity, prototype and names'")
self.add_matches_from_query_ratio(sql, choose, choose)
sql = """ select f.address, f.name, df.address, df.name, 'Same high complexity and names' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.names = df.names
and f.cyclomatic_complexity = df.cyclomatic_complexity
and f.cyclomatic_complexity >= 15
and df.names != '[]'"""
log_refresh("Finding with heuristic 'Same high complexity and names'")
self.add_matches_from_query_ratio_max(sql, choose, self.unreliable_chooser, 0.5)
if self.slow_heuristics:
sql = """select f.address, f.name, df.address, df.name, 'Strongly connected components' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.strongly_connected = df.strongly_connected
and df.strongly_connected > 1
and f.nodes > 5 and df.nodes > 5
and f.strongly_connected_spp > 1
and df.strongly_connected_spp > 1"""
log_refresh("Finding with heuristic 'Strongly connected components'")
self.add_matches_from_query_ratio_max(sql, self.partial_chooser, None, 0.80)
else:
sql = """select f.address, f.name, df.address, df.name, 'Strongly connected components' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.strongly_connected = df.strongly_connected
and df.strongly_connected > 3
and f.nodes > 5 and df.nodes > 5
and f.strongly_connected_spp > 1
and df.strongly_connected_spp > 1"""
log_refresh("Finding with heuristic 'Strongly connected components'")
self.add_matches_from_query_ratio_max(sql, self.partial_chooser, None, 0.80)
if self.slow_heuristics:
sql = """select f.address, f.name, df.address, df.name, 'Loop count' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.loops = df.loops
and df.loops > 1
and f.nodes > 3 and df.nodes > 3"""
log_refresh("Finding with heuristic 'Loop count'")
self.add_matches_from_query_ratio_max(sql, self.partial_chooser, None, 0.49)
sql = """ select f.address, f.name, df.address, df.name, 'Strongly connected components small-primes-product' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.strongly_connected_spp = df.strongly_connected_spp
and df.strongly_connected_spp > 1"""
log_refresh("Finding with heuristic 'Strongly connected components small-primes-product'")
self.add_matches_from_query_ratio(sql, self.best_chooser, self.partial_chooser, self.unreliable_chooser)
sql = """ select f.address, f.name, df.address, df.name, 'Same names and order' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.names = df.names
and df.names != '[]'"""
log_refresh("Finding with heuristic 'Same names and order'")
self.add_matches_from_query_ratio(sql, choose, choose)
sql = """select f.address, f.name, df.address, df.name,
'Same nodes, edges and strongly connected components' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.nodes = df.nodes
and f.edges = df.edges
and f.strongly_connected = df.strongly_connected
and df.nodes > 4"""
log_refresh("Finding with heuristic 'Same nodes, edges and strongly connected components'")
self.add_matches_from_query_ratio(sql, self.best_chooser, choose, self.unreliable_chooser)
def find_experimental_matches(self):
choose = self.unreliable_chooser
if self.slow_heuristics:
sql = """select distinct f.address, f.name, df.address, df.name, 'Similar small pseudo-code' description,
f.pseudocode, df.pseudocode,
f.pseudocode, df.pseudocode,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.pseudocode_lines = df.pseudocode_lines
and df.pseudocode_lines <= 5
and df.pseudocode is not null
and f.pseudocode is not null"""
log_refresh("Finding with heuristic 'Similar small pseudo-code'")
self.add_matches_from_query_ratio_max(sql, self.partial_chooser, choose, 0.49)
sql = """select distinct f.address, f.name, df.address, df.name, 'Small pseudo-code fuzzy AST hash' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where df.pseudocode_primes = f.pseudocode_primes
and f.pseudocode_lines <= 5"""
log_refresh("Finding with heuristic 'Small pseudo-code fuzzy AST hash'")
self.add_matches_from_query_ratio(sql, self.partial_chooser, choose)
sql = """select f.address, f.name, df.address, df.name, 'Equal small pseudo-code' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.pseudocode = df.pseudocode
and df.pseudocode is not null
and f.pseudocode_lines < 5"""
log_refresh("Finding with heuristic 'Equal small pseudo-code'")
self.add_matches_from_query_ratio(sql, self.best_chooser, self.partial_chooser)
sql = """ select f.address, f.name, df.address, df.name, 'Same high complexity, prototype and names' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.names = df.names
and f.cyclomatic_complexity = df.cyclomatic_complexity
and f.cyclomatic_complexity < 20
and f.prototype2 = df.prototype2
and df.names != '[]'"""
log_refresh("Finding with heuristic 'Same low complexity, prototype and names'")
self.add_matches_from_query_ratio_max(sql, self.partial_chooser, choose, 0.5)
sql = """ select f.address, f.name, df.address, df.name, 'Same low complexity and names' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.names = df.names
and f.cyclomatic_complexity = df.cyclomatic_complexity
and f.cyclomatic_complexity < 15
and df.names != '[]'"""
log_refresh("Finding with heuristic 'Same low complexity and names'")
self.add_matches_from_query_ratio_max(sql, self.partial_chooser, choose, 0.5)
if self.slow_heuristics:
# For large databases (>25k functions) it may cause, for a reason,
# the following error: OperationalError: database or disk is full
sql = """ select f.address, f.name, df.address, df.name,
'Same graph' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.nodes = df.nodes
and f.edges = df.edges
and f.indegree = df.indegree
and f.outdegree = df.outdegree
and f.cyclomatic_complexity = df.cyclomatic_complexity
and f.strongly_connected = df.strongly_connected
and f.loops = df.loops
and f.tarjan_topological_sort = df.tarjan_topological_sort
and f.strongly_connected_spp = df.strongly_connected_spp
order by
case when f.size = df.size then 1 else 0 end +
case when f.instructions = df.instructions then 1 else 0 end +
case when f.mnemonics = df.mnemonics then 1 else 0 end +
case when f.names = df.names then 1 else 0 end +
case when f.prototype2 = df.prototype2 then 1 else 0 end +
case when f.primes_value = df.primes_value then 1 else 0 end +
case when f.bytes_hash = df.bytes_hash then 1 else 0 end +
case when f.pseudocode_hash1 = df.pseudocode_hash1 then 1 else 0 end +
case when f.pseudocode_primes = df.pseudocode_primes then 1 else 0 end +
case when f.pseudocode_hash2 = df.pseudocode_hash2 then 1 else 0 end +
case when f.pseudocode_hash3 = df.pseudocode_hash3 then 1 else 0 end DESC"""
log_refresh("Finding with heuristic 'Same graph'")
self.add_matches_from_query_ratio(sql, self.best_chooser, self.partial_chooser, self.unreliable_chooser)
def find_unreliable_matches(self):
choose = self.unreliable_chooser
if self.slow_heuristics:
sql = """select f.address, f.name, df.address, df.name, 'Strongly connected components' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.strongly_connected = df.strongly_connected
and df.strongly_connected > 2"""
log_refresh("Finding with heuristic 'Strongly connected components'")
self.add_matches_from_query_ratio_max(sql, self.partial_chooser, choose, 0.54)
sql = """select f.address, f.name, df.address, df.name, 'Loop count' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.loops = df.loops
and df.loops > 1"""
log_refresh("Finding with heuristic 'Loop count'")
self.add_matches_from_query_ratio(sql, self.partial_chooser, choose)
sql = """ select distinct f.address ea, f.name name1, df.address ea2, df.name name2,
'Bytes hash' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.bytes_hash = df.bytes_hash
and f.instructions = df.instructions"""
log_refresh("Finding with heuristic 'Bytes hash'")
self.add_matches_from_query_ratio(sql, self.best_chooser, self.partial_chooser, self.unreliable_chooser)
sql = """ select distinct f.address ea, f.name name1, df.address ea2, df.name name2,
'Nodes, edges, complexity and mnemonics' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.nodes = df.nodes
and f.edges = df.edges
and f.mnemonics = df.mnemonics
and f.cyclomatic_complexity = df.cyclomatic_complexity
and f.nodes > 1 and f.edges > 0"""
log_refresh("Finding with heuristic 'Nodes, edges, complexity and mnemonics'")
self.add_matches_from_query_ratio(sql, self.best_chooser, self.partial_chooser)
sql = """ select distinct f.address ea, f.name name1, df.address ea2, df.name name2,
'Nodes, edges, complexity and prototype' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.nodes = df.nodes
and f.edges = df.edges
and f.prototype2 = df.prototype2
and f.cyclomatic_complexity = df.cyclomatic_complexity
and f.prototype2 != 'int()'"""
log_refresh("Finding with heuristic 'Nodes, edges, complexity and prototype'")
self.add_matches_from_query_ratio(sql, self.partial_chooser, choose)
sql = """ select distinct f.address ea, f.name name1, df.address ea2, df.name name2,
'Nodes, edges, complexity, in-degree and out-degree' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.nodes = df.nodes
and f.edges = df.edges
and f.cyclomatic_complexity = df.cyclomatic_complexity
and f.nodes > 1 and f.edges > 0
and f.indegree = df.indegree
and f.outdegree = df.outdegree"""
log_refresh("Finding with heuristic 'Nodes, edges, complexity, in-degree and out-degree'")
self.add_matches_from_query_ratio(sql, self.partial_chooser, choose)
sql = """ select distinct f.address ea, f.name name1, df.address ea2, df.name name2,
'Nodes, edges and complexity' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.nodes = df.nodes
and f.edges = df.edges
and f.cyclomatic_complexity = df.cyclomatic_complexity
and f.nodes > 1 and f.edges > 0"""
log_refresh("Finding with heuristic 'Nodes, edges and complexity'")
self.add_matches_from_query_ratio(sql, self.partial_chooser, choose)
sql = """select f.address, f.name, df.address, df.name, 'Similar small pseudo-code' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where df.pseudocode is not null
and f.pseudocode is not null
and f.pseudocode_lines = df.pseudocode_lines
and df.pseudocode_lines > 5"""
log_refresh("Finding with heuristic 'Similar small pseudo-code'")
self.add_matches_from_query_ratio_max(sql, self.partial_chooser, self.unreliable_chooser, 0.5)
sql = """ select f.address, f.name, df.address, df.name, 'Same high complexity' description,
f.pseudocode, df.pseudocode,
f.assembly, df.assembly,
f.pseudocode_primes, df.pseudocode_primes
from functions f,
diff.functions df
where f.cyclomatic_complexity = df.cyclomatic_complexity
and f.cyclomatic_complexity >= 50"""
log_refresh("Finding with heuristic 'Same high complexity'")
self.add_matches_from_query_ratio(sql, self.partial_chooser, choose)
def find_unmatched(self):
cur = self.db_cursor()
sql = "select name from functions"
cur.execute(sql)
rows = cur.fetchall()
if len(rows) > 0:
choose = CChooser("Unmatched in secondary", self, False)
for row in rows:
name = row[0]
demangled_name = Demangle(str(name), INF_SHORT_DN)
if demangled_name is not None:
name = demangled_name
if name not in self.matched1:
ea = LocByName(str(name))
choose.add_item(CChooser.Item(ea, name))
self.unmatched_second = choose
sql = "select name, address from diff.functions"
cur.execute(sql)
rows = cur.fetchall()
if len(rows) > 0:
choose = CChooser("Unmatched in primary", self, False)
for row in rows:
name = row[0]
demangled_name = Demangle(str(name), INF_SHORT_DN)
if demangled_name is not None:
name = demangled_name
if name not in self.matched2:
ea = row[1]
choose.add_item(CChooser.Item(ea, name))
self.unmatched_primary = choose
cur.close()
def create_choosers(self):
self.unreliable_chooser = CChooser("Unreliable matches", self)
self.partial_chooser = CChooser("Partial matches", self)
self.best_chooser = CChooser("Best matches", self)
def show_choosers(self, force=False):
if len(self.best_chooser.items) > 0:
self.best_chooser.show(force)
if len(self.partial_chooser.items) > 0:
self.partial_chooser.show(force)
if self.unreliable_chooser is not None and len(self.unreliable_chooser.items) > 0:
self.unreliable_chooser.show(force)
if self.unmatched_primary is not None and len(self.unmatched_primary.items) > 0:
self.unmatched_primary.show(force)
if self.unmatched_second is not None and len(self.unmatched_second.items) > 0:
self.unmatched_second.show(force)
def register_menu(self):
global g_bindiff
g_bindiff = self
idaapi.add_menu_item("Edit/Plugins/", "Diaphora - Show results", "F3", 0, show_choosers, ())
Warning("""AUTOHIDE REGISTRY\nIf you close one tab you can always re-open it by pressing F3
or selecting Edit -> Plugins -> Diaphora - Show results""")
def diff(self, db):
self.last_diff_db = db
cur = self.db_cursor()
cur.execute('attach "%s" as diff' % db)
try:
cur.execute("select value from diff.version")
except:
log("Error: %s " % sys.exc_info()[1])
Warning("The selected file does not like a valid SQLite exported database!")
cur.close()
return False
row = cur.fetchone()
if not row:
Warning("Invalid database!")
return False
if row[0] != VERSION_VALUE:
Warning("The database is from a different version (current %s, database %s)!" % (VERSION_VALUE, row[0]))
return False
# Create the choosers
self.create_choosers()
try:
log_refresh("Performing diffing...", True)
do_continue = True
if self.equal_db():
log("The databases seems to be 100% equal")
if askyn_c(0, "HIDECANCEL\nThe databases seems to be 100% equal. Do you want to continue anyway?") != 1:
do_continue = False
if do_continue:
# Compare the call graphs
self.check_callgraph()
# Find the unmodified functions
log_refresh("Finding best matches...")
self.find_equal_matches()
# Find the modified functions
log_refresh("Finding partial matches")
self.find_matches()
if self.unreliable:
# Find using likely unreliable methods modified functions
log_refresh("Finding probably unreliable matches")
self.find_unreliable_matches()
if self.experimental:
# Find using experimental methods modified functions
log_refresh("Finding experimental matches")
self.find_experimental_matches()
# Show the list of unmatched functions in both databases
log_refresh("Finding unmatched functions")
self.find_unmatched()
# And, finally, show the list of best and partial matches and
# register the hotkey for re-opening results
self.show_choosers()
self.register_menu()
log("Done")
finally:
cur.close()
hide_wait_box()
return True
#-----------------------------------------------------------------------
def remove_file(filename):
try:
os.remove(filename)
except:
# Fix for Bug #5: https://github.com/joxeankoret/diaphora/issues/5
#
# For some reason, in Windows, the handle to the SQLite database is
# not closed, and I really try to be sure that all the databases are
# detached, no cursor is leaked, etc... So, in case we cannot remove
# the database file because it's still being used by IDA in Windows
# for some unknown reason, just drop the database's tables and after
# that continue normally.
with sqlite3.connect(filename) as db:
cur = db.cursor()
try:
funcs = ["functions", "program", "program_data", "version",
"instructions", "basic_blocks", "bb_relations",
"bb_instructions", "function_bblocks"]
for func in funcs:
db.execute("drop table if exists %s" % func)
finally:
cur.close()
class BinDiffOptions:
def __init__(self, **kwargs):
total_functions = len(list(Functions()))
self.file_out = kwargs.get('file_out', os.path.splitext(GetIdbPath())[0] + ".sqlite")
self.file_in = kwargs.get('file_in', '')
self.use_decompiler = kwargs.get('use_decompiler', True)
self.unreliable = kwargs.get('unreliable', True)
self.slow = kwargs.get('slow', True)
# Enable, by default, relaxed calculations on difference ratios for
# 'big' databases (>20k functions)
self.relax = kwargs.get('relax', total_functions > 20000)
if self.relax:
Warning(MSG_RELAXED_RATIO_ENABLED)
self.experimental = kwargs.get('experimental', False)
self.min_ea = kwargs.get('min_ea', MinEA())
self.max_ea = kwargs.get('max_ea', MaxEA())
self.ida_subs = kwargs.get('ida_subs', True)
self.ignore_sub_names = kwargs.get('ignore_sub_names', True)
self.ignore_all_names = kwargs.get('ignore_all_names', False)
# Enable, by default, exporting only function summaries for huge dbs.
self.func_summaries_only = kwargs.get('func_summaries_only', total_functions > 100000)
#-----------------------------------------------------------------------
def _diff_or_export(use_ui, **options):
global g_bindiff
total_functions = len(list(Functions()))
if GetIdbPath() == "" or total_functions == 0:
Warning("No IDA database opened or no function in the database.\nPlease open an IDA database and create some functions before running this script.")
return
opts = BinDiffOptions(**options)
if use_ui:
x = CBinDiffExporterSetup()
x.Compile()
x.set_options(opts)
if not x.Execute():
return
opts = x.get_options()
if opts.file_out == opts.file_in:
Warning("Both databases are the same file!")
return
elif opts.file_out == "" or len(opts.file_out) < 5:
Warning("No output database selected or invalid filename. Please select a database file.")
return
elif opts.file_out[len(opts.file_out)-4:].lower() in [".idb", ".i64"] or opts.file_in[len(opts.file_in)-4:].lower() in [".idb", ".i64"]:
Warning("One of the selected databases is an IDA database (IDB or I64), not a SQLite database!")
return
elif opts.file_out.lower().endswith(".til") or opts.file_in.lower().endswith(".id0") or opts.file_in.lower().endswith(".id1") or opts.file_in.lower().endswith(".nam"):
Warning("One of the selected databases is an IDA temporary file, not a SQLite database!")
return
export = True
if os.path.exists(opts.file_out):
ret = askyn_c(0, "Export database already exists. Do you want to overwrite it?")
if ret == -1:
log("Cancelled")
return
if ret == 0:
export = False
if export:
if g_bindiff is not None:
g_bindiff = None
remove_file(opts.file_out)
log("Database %s removed" % repr(opts.file_out))
try:
bd = CBinDiff(opts.file_out)
bd.use_decompiler_always = opts.use_decompiler
bd.unreliable = opts.unreliable
bd.slow_heuristics = opts.slow
bd.relaxed_ratio = opts.relax
bd.experimental = opts.experimental
bd.min_ea = opts.min_ea
bd.max_ea = opts.max_ea
bd.ida_subs = opts.ida_subs
bd.ignore_sub_names = opts.ignore_sub_names
bd.ignore_all_names = opts.ignore_all_names
bd.function_summaries_only = opts.func_summaries_only
bd.max_processed_rows = MAX_PROCESSED_ROWS * max(total_functions / 20000, 1)
bd.timeout = TIMEOUT_LIMIT * max(total_functions / 20000, 1)
if export:
if os.getenv("DIAPHORA_PROFILE") is not None:
log("*** Profiling export ***")
import cProfile
profiler = cProfile.Profile()
profiler.runcall(bd.export)
profiler.print_stats(sort="time")
else:
bd.export()
log("Database exported")
if opts.file_in != "":
if os.getenv("DIAPHORA_PROFILE") is not None:
log("*** Profiling diff ***")
import cProfile
profiler = cProfile.Profile()
profiler.runcall(bd.diff, opts.file_in)
profiler.print_stats(sort="time")
else:
bd.diff(opts.file_in)
except:
print("Error: %s" % sys.exc_info()[1])
traceback.print_exc()
return bd
def diff_or_export_ui():
return _diff_or_export(True)
def diff_or_export(**options):
return _diff_or_export(False, **options)
if __name__ == "__main__":
if os.getenv("DIAPHORA_AUTO") is not None:
file_out = os.getenv("DIAPHORA_EXPORT_FILE")
if file_out is None:
raise Exception("No export file specified!")
use_decompiler = os.getenv("DIAPHORA_USE_DECOMPILER")
if use_decompiler is None:
use_decompiler = False
bd = CBinDiff(file_out)
bd.use_decompiler_always = use_decompiler
if os.path.exists(file_out):
if g_bindiff is not None:
g_bindiff = None
remove_file(file_out)
log("Database %s removed" % repr(file_out))
bd.export()
else:
diff_or_export_ui()
|
# Standard library imports
import os
from datetime import date
# Bokeh imports
from bokeh import __version__
from bokeh.settings import settings
# -- Project configuration -----------------------------------------------------
author = "Bokeh Contributors"
year = date.today().year
copyright = f"©{year} {author}."
project = 'Bokeh'
version = settings.docs_version() or __version__
# -- Sphinx configuration -----------------------------------------------------
add_module_names = False
exclude_patterns = ['docs/releases/*']
extensions = [
'sphinxext.opengraph',
'sphinx_panels',
'sphinx_reredirects',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.ifconfig',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'bokeh.sphinxext.bokeh_autodoc',
'bokeh.sphinxext.bokeh_color',
'bokeh.sphinxext.bokeh_enum',
'bokeh.sphinxext.bokeh_gallery',
'bokeh.sphinxext.bokeh_github',
'bokeh.sphinxext.bokeh_jinja',
'bokeh.sphinxext.bokeh_model',
'bokeh.sphinxext.bokeh_options',
'bokeh.sphinxext.bokeh_palette',
'bokeh.sphinxext.bokeh_palette_group',
'bokeh.sphinxext.bokeh_plot',
'bokeh.sphinxext.bokeh_prop',
'bokeh.sphinxext.bokeh_releases',
'bokeh.sphinxext.bokeh_settings',
'bokeh.sphinxext.bokeh_sitemap',
'bokeh.sphinxext.bokehjs_content',
'bokeh.sphinxext.collapsible_code_block',
'bokeh.sphinxext.theme',
]
needs_sphinx = '1.8'
rst_epilog = """
.. |Color| replace:: :py:class:`~bokeh.core.properties.Color`
.. |DataSpec| replace:: :py:class:`~bokeh.core.properties.DataSpec`
.. |Document| replace:: :py:class:`~bokeh.document.Document`
.. |HasProps| replace:: :py:class:`~bokeh.core.has_props.HasProps`
.. |Model| replace:: :py:class:`~bokeh.model.Model`
.. |Property| replace:: :py:class:`~bokeh.core.property.bases.Property`
.. |PropertyDescriptor| replace:: :py:class:`~bokeh.core.property.descriptor.PropertyDescriptor`
.. |PropertyContainer| replace:: :py:class:`~bokeh.core.property.wrappers.PropertyContainer`
.. |UnitsSpec| replace:: :py:class:`~bokeh.core.properties.UnitsSpec`
.. |field| replace:: :py:func:`~bokeh.core.properties.field`
.. |value| replace:: :py:func:`~bokeh.core.properties.value`
"""
# -- Extensions configuration --------------------------------------------------
autodoc_member_order = 'groupwise'
bokeh_missing_google_api_key_ok = False
if not bokeh_missing_google_api_key_ok:
if "GOOGLE_API_KEY" not in os.environ:
raise RuntimeError("\n\nThe GOOGLE_API_KEY environment variable is not set. Set GOOGLE_API_KEY to a valid API key, "
"or set bokeh_missing_google_api_key_ok=True in conf.py to build anyway (with broken GMaps)")
bokeh_plot_pyfile_include_dirs = ['docs']
intersphinx_mapping = {
'python' : ('https://docs.python.org/3/', None),
'pandas' : ('https://pandas.pydata.org/pandas-docs/stable/', None),
'numpy' : ('https://docs.scipy.org/doc/numpy/', None)
}
napoleon_include_init_with_doc = True
pygments_style = 'sphinx'
redirects = {
"docs/installation": "first_steps/installation.html",
"docs/user_guide/quickstart": "../first_steps.html",
}
# configuration for sphinxext.opengraph
ogp_site_url = 'https://docs.bokeh.org/en/latest/'
ogp_image = 'http://static.bokeh.org/og/logotype-on-hex.png'
ogp_custom_meta_tags = [
'<meta name="twitter:card" content="summary_large_image" />',
'<meta property="twitter:site" content="@bokeh" />',
'<meta name="image" property="og:image" content="http://static.bokeh.org/og/logotype-on-hex.png">',
]
# -- Options for HTML output ---------------------------------------------------
html_context = {
'AUTHOR': author,
'DESCRIPTION': 'Bokeh visualization library, documentation site.',
'SITEMAP_BASE_URL': 'https://docs.bokeh.org/en/', # Trailing slash is needed
'VERSION': version,
}
html_theme = 'bokeh'
html_theme_path = ['.']
html_title = f"{project} {version} Documentation"
Update URL for numpy intersphinx inventory (#10873)
# Standard library imports
import os
from datetime import date
# Bokeh imports
from bokeh import __version__
from bokeh.settings import settings
# -- Project configuration -----------------------------------------------------
author = "Bokeh Contributors"
year = date.today().year
copyright = f"©{year} {author}."
project = 'Bokeh'
version = settings.docs_version() or __version__
# -- Sphinx configuration -----------------------------------------------------
add_module_names = False
exclude_patterns = ['docs/releases/*']
extensions = [
'sphinxext.opengraph',
'sphinx_panels',
'sphinx_reredirects',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.ifconfig',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'bokeh.sphinxext.bokeh_autodoc',
'bokeh.sphinxext.bokeh_color',
'bokeh.sphinxext.bokeh_enum',
'bokeh.sphinxext.bokeh_gallery',
'bokeh.sphinxext.bokeh_github',
'bokeh.sphinxext.bokeh_jinja',
'bokeh.sphinxext.bokeh_model',
'bokeh.sphinxext.bokeh_options',
'bokeh.sphinxext.bokeh_palette',
'bokeh.sphinxext.bokeh_palette_group',
'bokeh.sphinxext.bokeh_plot',
'bokeh.sphinxext.bokeh_prop',
'bokeh.sphinxext.bokeh_releases',
'bokeh.sphinxext.bokeh_settings',
'bokeh.sphinxext.bokeh_sitemap',
'bokeh.sphinxext.bokehjs_content',
'bokeh.sphinxext.collapsible_code_block',
'bokeh.sphinxext.theme',
]
needs_sphinx = '1.8'
rst_epilog = """
.. |Color| replace:: :py:class:`~bokeh.core.properties.Color`
.. |DataSpec| replace:: :py:class:`~bokeh.core.properties.DataSpec`
.. |Document| replace:: :py:class:`~bokeh.document.Document`
.. |HasProps| replace:: :py:class:`~bokeh.core.has_props.HasProps`
.. |Model| replace:: :py:class:`~bokeh.model.Model`
.. |Property| replace:: :py:class:`~bokeh.core.property.bases.Property`
.. |PropertyDescriptor| replace:: :py:class:`~bokeh.core.property.descriptor.PropertyDescriptor`
.. |PropertyContainer| replace:: :py:class:`~bokeh.core.property.wrappers.PropertyContainer`
.. |UnitsSpec| replace:: :py:class:`~bokeh.core.properties.UnitsSpec`
.. |field| replace:: :py:func:`~bokeh.core.properties.field`
.. |value| replace:: :py:func:`~bokeh.core.properties.value`
"""
# -- Extensions configuration --------------------------------------------------
autodoc_member_order = 'groupwise'
bokeh_missing_google_api_key_ok = False
if not bokeh_missing_google_api_key_ok:
if "GOOGLE_API_KEY" not in os.environ:
raise RuntimeError("\n\nThe GOOGLE_API_KEY environment variable is not set. Set GOOGLE_API_KEY to a valid API key, "
"or set bokeh_missing_google_api_key_ok=True in conf.py to build anyway (with broken GMaps)")
bokeh_plot_pyfile_include_dirs = ['docs']
intersphinx_mapping = {
'python' : ('https://docs.python.org/3/', None),
'pandas' : ('https://pandas.pydata.org/pandas-docs/stable/', None),
'numpy' : ('https://numpy.org/doc/stable/', None)
}
napoleon_include_init_with_doc = True
pygments_style = 'sphinx'
redirects = {
"docs/installation": "first_steps/installation.html",
"docs/user_guide/quickstart": "../first_steps.html",
}
# configuration for sphinxext.opengraph
ogp_site_url = 'https://docs.bokeh.org/en/latest/'
ogp_image = 'http://static.bokeh.org/og/logotype-on-hex.png'
ogp_custom_meta_tags = [
'<meta name="twitter:card" content="summary_large_image" />',
'<meta property="twitter:site" content="@bokeh" />',
'<meta name="image" property="og:image" content="http://static.bokeh.org/og/logotype-on-hex.png">',
]
# -- Options for HTML output ---------------------------------------------------
html_context = {
'AUTHOR': author,
'DESCRIPTION': 'Bokeh visualization library, documentation site.',
'SITEMAP_BASE_URL': 'https://docs.bokeh.org/en/', # Trailing slash is needed
'VERSION': version,
}
html_theme = 'bokeh'
html_theme_path = ['.']
html_title = f"{project} {version} Documentation"
|
from configparser import ConfigParser
from urllib.parse import quote
from bs4 import BeautifulSoup
import urllib
import urllib.request
import sys
import io
import re
import time
sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='utf8')
page_num=15
#index_url='http://search.cnki.com.cn/Search.aspx?q=%E6%A0%91&rank=relevant&cluster=Type&val=D049&p=' #+str(page_num)
def get_paper_url(page_url):
html = urllib.request.urlopen(page_url).read()
soup = BeautifulSoup(html,'html.parser')
#print(soup.find_all('div', class_='wz_content',a))
#pp=soup.findAll('a',attrs={'href':re.compile('^http'),'id':'link1'})
f = open('data-detail.txt','a+', encoding='utf-8')
all = soup.find_all('div', class_='wz_content')
for string in all:
item = string.find('a', target='_blank')#文章标题与链接
href = item.get('href')# 获取文章url
title = item.get_text() # 获取文章标题
year_count = string.find('span', class_='year-count')#获取文章出处与引用次数
#year_count = year_count.get_text()
publish = ''
reference = ''
for item in year_count:
item = item.string
item = item.replace('\n','')
item = item.replace('\r', '')
if '被引次数' in item:
reference = item# 获取被引次数
elif '年' in item: # 获取文章出处
publish = item
print(publish)
print(reference)
#print(year_count)
f.write(href + '\t' + title + '\t' + publish + '\t' + reference +'\n')
f.close()
if __name__ == '__main__':
start = time.clock()
cf = ConfigParser()
cf.read("Config.conf", encoding='utf-8')
keyword = cf.get('base', 'keyword')
index_url='http://search.cnki.com.cn/Search.aspx?q='+quote(keyword)+'&rank=&cluster=&val=&p='#quote方法把汉字转换为encodeuri?
print(index_url)
for i in range(0,68):
page_num=15
page_str_num=i*page_num
page_url=index_url+str(page_str_num)
#get_page_url(i)
print(page_url)
get_paper_url(page_url)
cf.set('base', 'currentpage', str(i))
cf.write(open("Config.conf", "w", encoding='utf-8'))
end = time.clock()
print ('Running time: %s Seconds'%(end-start))
添加爬取参考文献,读取配置文件
from configparser import ConfigParser
from urllib.parse import quote
from bs4 import BeautifulSoup
import urllib
import urllib.request
import sys
import io
import time
import spider_paper
sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='utf8')
page_num=15
#index_url='http://search.cnki.com.cn/Search.aspx?q=%E6%A0%91&rank=relevant&cluster=Type&val=D049&p=' #+str(page_num)
def get_paper_url(page_url):
html = urllib.request.urlopen(page_url).read()
soup = BeautifulSoup(html,'html.parser')
#print(soup.find_all('div', class_='wz_content',a))
#pp=soup.findAll('a',attrs={'href':re.compile('^http'),'id':'link1'})
f = open('data-detail.txt','a+', encoding='utf-8')
all = soup.find_all('div', class_='wz_content')
for string in all:
item = string.find('a', target='_blank')#文章标题与链接
href = item.get('href')# 获取文章url
title = item.get_text() # 获取文章标题
year_count = string.find('span', class_='year-count')#获取文章出处与引用次数
#year_count = year_count.get_text()
publish = ''
reference = ''
for item in year_count:
item = item.string
item = item.replace('\n','')
item = item.replace('\r', '')
if '被引次数' in item:
reference = item# 获取被引次数
elif '年' in item: # 获取文章出处
publish = item
print(publish)
print(reference)
#print(year_count)
f.write(href + '\t' + title + '\t' + publish + '\t' + reference +'\n')
f.close()
if __name__ == '__main__':
start = time.clock()
cf = ConfigParser()
cf.read("Config.conf", encoding='utf-8')
keyword = cf.get('base', 'keyword')# 关键词
maxpage = cf.getint('base', 'max_page')# 最大页码
index_url='http://search.cnki.com.cn/Search.aspx?q='+quote(keyword)+'&rank=&cluster=&val=&p='#quote方法把汉字转换为encodeuri?
print(index_url)
for i in range(0, maxpage):
page_num=15
page_str_num=i*page_num
page_url=index_url+str(page_str_num)
#get_page_url(i)
print(page_url)
get_paper_url(page_url)
cf.set('base', 'currentpage', str(i))
cf.write(open("Config.conf", "w", encoding='utf-8'))
spider_paper.spider_paper()# spider_paper补全文章信息
end = time.clock()
print ('Running time: %s Seconds'%(end-start))
|
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union
from activitystreams.models.activity import Activity
from activitystreams.models.defobject import DefObject
from flask import request
from dino.config import ApiTargets
from dino.config import ErrorCodes as ECodes
from dino.hooks import *
from dino.config import ApiActions
from dino.utils.decorators import timeit
from dino import validation
import logging
__author__ = 'Oscar Eriksson <oscar@thenetcircle.com>'
logger = logging.getLogger(__name__)
def connect() -> (int, None):
"""
connect to the server
:return: {'status_code': 200}
"""
return ECodes.OK, None
@timeit(logger, 'on_login')
def on_login(data: dict, activity: Activity) -> (int, Union[str, None]):
"""
event sent directly after a connection has successfully been made, to get the user_id for this connection
:param data: activity streams format, needs actor.id (user id) and actor.summary (user name)
:param activity: the parsed activity, supplied by @pre_process decorator, NOT by calling endpoint
:return: if ok: {'status_code': 200}, else: {'status_code': 400, 'data': '<some error message>'}
"""
user_id = environ.env.session.get(SessionKeys.user_id.value)
user_name = environ.env.session.get(SessionKeys.user_name.value)
if not environ.env.config.get(ConfigKeys.TESTING):
if str(user_id) in environ.env.connected_user_ids:
logger.info('a new connection for user ID %s, will disconnect previous one' % user_id)
environ.env.disconnect_by_sid(environ.env.connected_user_ids[str(user_id)])
environ.env.connected_user_ids[str(user_id)] = environ.env.request.sid
user_roles = utils.get_user_roles(user_id)
response = utils.activity_for_login(user_id, user_name)
response['actor']['attachments'] = list()
if len(user_roles['global']) > 0:
response['actor']['attachments'].append({
'objectType': 'global_role',
'content': ','.join(user_roles['global'])
})
for room_uuid, roles in user_roles['room'].items():
response['actor']['attachments'].append({
'objectType': 'room_role',
'id': room_uuid,
'content': ','.join(roles)
})
for channel_uuid, roles in user_roles['channel'].items():
response['actor']['attachments'].append({
'objectType': 'channel_role',
'id': channel_uuid,
'content': ','.join(roles)
})
environ.env.observer.emit('on_login', (data, activity))
return ECodes.OK, response
@timeit(logger, 'on_delete')
def on_delete(data: dict, activity: Activity):
environ.env.observer.emit('on_delete', (data, activity))
return ECodes.OK, None
@timeit(logger, 'on_message')
def on_message(data, activity: Activity):
"""
send any kind of message/event to a target user/room
object.url: target channel_id
target.id: target room_id
actor.id: sender user_id
actor.url: sender room_id
:param data: activity streams format, must include target.id (room/user id) and object.url (channel id)
:param activity: the parsed activity, supplied by @pre_process decorator, NOT by calling endpoint
:return: {'status_code': ECodes.OK, 'data': '<same AS as client sent, plus timestamp>'}
"""
room_id = activity.target.id
from_room_id = activity.actor.url
# only if cross-room should we broadcast the origin room id with the activity; less confusion for clients
if from_room_id is not None and from_room_id == room_id:
del data['actor']['url']
channel_id = None
if activity.target.object_type != 'room':
if hasattr(activity, 'object') and hasattr(activity.object, 'url'):
channel_id = activity.object.url
if channel_id is None or len(channel_id.strip()) == 0:
channel_id = utils.get_channel_for_room(room_id)
channel_name = utils.get_channel_name(channel_id)
if not hasattr(activity, 'object'):
activity.object = DefObject(dict())
activity.object.url = channel_id
activity.object.display_name = channel_name
if 'object' not in data or len(data['object']) == 0:
data['object'] = {
'url': activity.object.url,
'displayName': activity.object.display_name
}
else:
data['object']['url'] = activity.object.url
data['object']['displayName'] = activity.object.display_name
if from_room_id is not None and len(from_room_id.strip()) > 0:
activity.provider.url = utils.get_channel_for_room(from_room_id)
activity.provider.display_name = utils.get_channel_name(activity.provider.url)
if 'provider' not in data or len(data['provider']) == 0:
data['provider'] = {
'url': activity.provider.url,
'displayName': activity.provider.display_name
}
else:
data['provider']['url'] = activity.provider.url
data['provider']['displayName'] = activity.provider.display_name
if activity.target.object_type == 'room':
activity.target.display_name = utils.get_room_name(activity.target.id)
else:
activity.target.display_name = utils.get_user_name_for(activity.target.id)
activity.object.display_name = ''
activity.object.url = ''
activity.actor.display_name = utils.b64e(environ.env.session.get(SessionKeys.user_name.value))
data['actor']['displayName'] = activity.actor.display_name
data['target']['displayName'] = utils.b64e(activity.target.display_name)
data['object']['displayName'] = utils.b64e(activity.object.display_name)
environ.env.observer.emit('on_message', (data, activity))
return ECodes.OK, data
@timeit(logger, 'on_update_user_info')
def on_update_user_info(data: dict, activity: Activity) -> (int, Union[str, None]):
"""
broadcast a user info update to a room, or all rooms the user is in if no target.id specified
:param data: activity streams format, must include object.attachments (user info)
:param activity: the parsed activity, supplied by @pre_process decorator, NOT by calling endpoint
:return: {'status_code': ECodes.OK, 'data': '<same AS as client sent, plus timestamp>'}
"""
activity.actor.display_name = utils.b64e(environ.env.session.get(SessionKeys.user_name.value))
data['actor']['displayName'] = activity.actor.display_name
environ.env.observer.emit('on_update_user_info', (data, activity))
return ECodes.OK, data
@timeit(logger, 'on_ban')
def on_ban(data: dict, activity: Activity) -> (int, Union[str, None]):
"""
ban a user from a room (if user is an owner/admin/moderator)
target.id: the uuid of the room that the user is in
target.displayName: the room name
object.id: the id of the user to kick
object.content: the name of the user to kick
object.summary: the ban time, e.g.
actor.id: the id of the kicker
actor.content: the name of the kicker
:param data: activity streams format
:param activity: the parsed activity, supplied by @pre_process decorator, NOT by calling endpoint
:return: if ok: {'status_code': 200}, else: {'status_code': 400, 'data': '<error message>'}
"""
environ.env.observer.emit('on_ban', (data, activity))
environ.env.observer.emit('on_kick', (data, activity))
return ECodes.OK, None
@timeit(logger, 'on_kick')
def on_kick(data: dict, activity: Activity) -> (int, None):
"""
kick a user from a room (if user is an owner)
target.id: the uuid of the room that the user is in
target.displayName: the room name
object.id: the id of the user to kick
object.content: the name of the user to kick
actor.id: the id of the kicker
actor.content: the name of the kicker
:param data: activity streams format
:param activity: the parsed activity, supplied by @pre_process decorator, NOT by calling endpoint
:return: if ok: {'status_code': 200}, else: {'status_code': 400, 'data': '<error message>'}
"""
environ.env.observer.emit('on_kick', (data, activity))
return ECodes.OK, None
@timeit(logger, 'on_whisper')
def on_whisper(data: dict, activity: Activity) -> (int, None):
"""
whisper to another person in the same room, only that person will receive the event. Functions as a private message
:param data: activity streams format
:param activity: the parsed activity, supplied by @pre_process decorator, NOT by calling endpoint
:return: if ok: {'status_code': 200}, else: {'status_code': 400, 'data': '<error message>'}
"""
environ.env.observer.emit('on_whisper', (data, activity))
return ECodes.OK, None
@timeit(logger, 'on_invite')
def on_invite(data: dict, activity: Activity) -> (int, None):
"""
invite a user to the a room this user is in
:param data: activity streams format
:param activity: the parsed activity, supplied by @pre_process decorator, NOT by calling endpoint
:return: if ok: {'status_code': 200}, else: {'status_code': 400, 'data': '<error message>'}
"""
environ.env.observer.emit('on_invite', (data, activity))
return ECodes.OK, None
@timeit(logger, 'on_request_admin')
def on_request_admin(data: dict, activity: Activity) -> (int, None):
"""
request the presence of an admin in the current room
:param data: activity streams format
:param activity: the parsed activity, supplied by @pre_process decorator, NOT by calling endpoint
:return: if ok: {'status_code': 200}, else: {'status_code': 400, 'data': '<error message>'}
"""
online_admins = environ.env.db.get_online_admins()
if len(online_admins) == 0:
return ECodes.NO_ADMIN_ONLINE, 'no admin is online'
environ.env.observer.emit('on_request_admin', (data, activity))
return ECodes.OK, None
@timeit(logger, 'on_create')
def on_create(data: dict, activity: Activity) -> (int, dict):
"""
create a new room
:param data: activity streams format, must include target.display_name (room name) and object.id (channel id)
:param activity: the parsed activity, supplied by @pre_process decorator, NOT by calling endpoint
:return: if ok: {'status_code': ECodes.OK, 'data': '<same AS as in the request, with addition of target.id (generated UUID
for the new room>'}, else: {'status_code': 400, 'data': '<error message>'}
"""
# generate a uuid for this room
activity.target.id = str(uuid())
activity.target.object_type = 'room'
data['target']['id'] = activity.target.id
data['target']['objectType'] = activity.target.object_type
environ.env.observer.emit('on_create', (data, activity))
if hasattr(activity, 'object') and hasattr(activity.object, 'attachments'):
if activity.object.attachments is not None and len(activity.object.attachments) > 0:
environ.env.observer.emit('on_set_acl', (data, activity))
return ECodes.OK, data
@timeit(logger, 'on_set_acl')
def on_set_acl(data: dict, activity: Activity) -> (int, str):
"""
change ACL of a room; only allowed if the user is the owner of the room
:param data: activity streams, acls as attachments to object with object_type as acl name and content as acl value
:param activity: the parsed activity, supplied by @pre_process decorator, NOT by calling endpoint
:return: if ok: {'status_code': 200}, else: {'status_code': 400, 'data': '<some error message>'}
"""
environ.env.observer.emit('on_set_acl', (data, activity))
return ECodes.OK, None
@timeit(logger, 'on_report')
def on_report(data: dict, activity: Activity) -> (int, str):
"""
when a user reports a user based on a message
:param data: activity streams format dict
:param activity: the parsed activity, supplied by @pre_process decorator, NOT by calling endpoint
:return: if ok: {'status_code': 200}, else: {'status_code': 400, 'data': '<some error message>'}
"""
environ.env.observer.emit('on_report', (data, activity))
return ECodes.OK, None
@timeit(logger, 'on_get_acl')
def on_get_acl(data: dict, activity: Activity) -> (int, Union[str, dict]):
"""
change ACL of a room; only allowed if the user is the owner of the room
:param data: activity streams format
:param activity: the parsed activity, supplied by @pre_process decorator, NOT by calling endpoint
:return: if ok: {'status_code': 200}, else: {'status_code': 400, 'data': '<AS with acl as object.attachments>'}
"""
if activity.target.object_type == ApiTargets.CHANNEL:
acls = utils.get_acls_for_channel(activity.target.id)
else:
acls = utils.get_acls_for_room(activity.target.id)
environ.env.observer.emit('on_get_acl', (data, activity))
return ECodes.OK, utils.activity_for_get_acl(activity, acls)
@timeit(logger, 'on_status')
def on_status(data: dict, activity: Activity) -> (int, Union[str, None]):
"""
change online status
:param data: activity streams format, needs actor.id (user id), actor.summary (user name) and verb
(online/invisible/offline)
:param activity: the parsed activity, supplied by @pre_process decorator, NOT by calling endpoint
:return: if ok: {'status_code': 200}, else: {'status_code': 400, 'data': '<some error message>'}
"""
environ.env.observer.emit('on_status', (data, activity))
return ECodes.OK, None
@timeit(logger, 'on_history')
def on_history(data: dict, activity: Activity) -> (int, Union[str, None]):
"""
get the history of a room
the 'updated' field is optional, and if set history since that point will be returned (only if dino has been
configured with the history type 'unread' instead of 'top')
:param data: activity streams format
:param activity: the parsed activity, supplied by @pre_process decorator, NOT by calling endpoint
:return: if ok: {'status_code': 200}, else: {'status_code': 400, 'data': '<some error message>'}
"""
room_id = activity.target.id
user_id = activity.actor.id
last_read = activity.updated
messages = utils.get_history_for_room(room_id, user_id, last_read)
environ.env.observer.emit('on_history', (data, activity))
return ECodes.OK, utils.activity_for_history(activity, messages)
@timeit(logger, 'on_remove_room')
def on_remove_room(data: dict, activity: Activity) -> (int, Union[str, None]):
"""
remove a room
:param data: json dict in activity streams format
:param activity: the parsed activity, supplied by @pre_process decorator, NOT by calling endpoint
:return: if ok: {'status_code': 200}, else: {'status_code': 400, 'data': '<some error message>'}
"""
room_id = activity.target.id
room_name = utils.get_room_name(room_id)
channel_id = utils.get_channel_for_room(room_id)
reason = None
if hasattr(activity.object, 'content'):
reason = activity.object.content
remove_activity = utils.activity_for_remove_room(
activity.actor.id, activity.actor.display_name, room_id, room_name, reason)
environ.env.db.remove_room(channel_id, room_id)
environ.env.emit('gn_room_removed', remove_activity, broadcast=True, include_self=True)
environ.env.observer.emit('on_remove_room', (data, activity))
return ECodes.OK, utils.activity_for_room_removed(activity, room_name)
@timeit(logger, 'on_join')
def on_join(data: dict, activity: Activity) -> (int, Union[str, None]):
"""
join a room
:param data: activity streams format, need actor.id (user id), target.id (user id), actor.summary (user name)
:param activity: the parsed activity, supplied by @pre_process decorator, NOT by calling endpoint
:return: if ok: {'status_code': 200}, else: {'status_code': 400, 'data': '<some error message>'}
"""
room_id = activity.target.id
user_id = activity.actor.id
last_read = activity.updated
messages = utils.get_history_for_room(room_id, user_id, last_read)
owners = utils.get_owners_for_room(room_id)
acls = utils.get_acls_for_room(room_id)
users = utils.get_users_in_room(room_id, user_id, skip_cache=True)
environ.env.observer.emit('on_join', (data, activity))
return ECodes.OK, utils.activity_for_join(activity, acls, messages, owners, users)
@timeit(logger, 'on_users_in_room')
def on_users_in_room(data: dict, activity: Activity) -> (int, Union[dict, str]):
"""
get a list of users in a room
:param data: activity streams format, need target.id (room id)
:param activity: the parsed activity, supplied by @pre_process decorator, NOT by calling endpoint
:return: if ok, {'status_code': ECodes.OK, 'data': <AS with users as object.attachments>}
"""
# TODO: should people not in the room be able to list users in the room?
room_id = activity.target.id
user_id = activity.actor.id
users = utils.get_users_in_room(room_id, user_id)
environ.env.observer.emit('on_users_in_room', (data, activity))
return ECodes.OK, utils.activity_for_users_in_room(activity, users)
@timeit(logger, 'on_list_rooms')
def on_list_rooms(data: dict, activity: Activity) -> (int, Union[dict, str]):
"""
get a list of rooms
:param data: activity streams format, needs actor.id (user id) and object.id (channel id)
:param activity: the parsed activity, supplied by @pre_process decorator, NOT by calling endpoint
:return: if ok, {'status_code': ECodes.OK, 'data': <AS with rooms as object.attachments>}
"""
channel_id = activity.object.url
rooms = environ.env.db.rooms_for_channel(channel_id)
roles = utils.get_user_roles(environ.env.session.get(SessionKeys.user_id.value))
room_roles = roles['room']
filtered_rooms = dict()
for room_id, room_details in rooms.items():
acls = utils.get_acls_in_room_for_action(room_id, ApiActions.LIST)
is_valid, err_msg = validation.acl.validate_acl_for_action(
activity, ApiTargets.ROOM, ApiActions.LIST, acls, target_id=room_id, object_type='room')
# if not allowed to join, don't show in list
if not is_valid:
continue
room_details['roles'] = ''
if room_id in room_roles.keys():
room_details['roles'] = ','.join(room_roles[room_id])
filtered_rooms[room_id] = room_details
environ.env.observer.emit('on_list_rooms', (data, activity))
activity_json = utils.activity_for_list_rooms(activity, filtered_rooms)
rooms_with_acls = activity_json['object']['attachments']
for room_info in rooms_with_acls:
acls = utils.get_acls_for_room(room_info['id'])
acl_activity = utils.activity_for_get_acl(activity, acls)
room_info['attachments'] = acl_activity['object']['attachments']
activity_json['object']['attachments'] = rooms_with_acls
return ECodes.OK, activity_json
@timeit(logger, 'on_list_channels')
def on_list_channels(data: dict, activity: Activity) -> (int, Union[dict, str]):
"""
get a list of channels
:param data: activity streams format, needs actor.id (user id)
:param activity: the parsed activity, supplied by @pre_process decorator, NOT by calling endpoint
:return: if ok, {'status_code': ECodes.OK, 'data': <AS with channels as object.attachments>}
"""
channels = environ.env.db.get_channels()
environ.env.observer.emit('on_list_channels', (data, activity))
activity_json = utils.activity_for_list_channels(activity, channels)
channels_with_acls = activity_json['object']['attachments']
filtered_channels = list()
for channel_info in channels_with_acls:
channel_id = channel_info['id']
list_acls = utils.get_acls_in_channel_for_action(channel_id, ApiActions.LIST)
activity.object.url = channel_id
activity.target.object_type = 'channel'
is_valid, err_msg = validation.acl.validate_acl_for_action(
activity, ApiTargets.CHANNEL, ApiActions.LIST, list_acls, target_id=channel_id, object_type='channel')
# not allowed to list this channel
if not is_valid:
continue
acls = utils.get_acls_for_channel(channel_id)
acl_activity = utils.activity_for_get_acl(activity, acls)
channel_info['attachments'] = acl_activity['object']['attachments']
filtered_channels.append(channel_info)
activity_json['object']['attachments'] = filtered_channels
return ECodes.OK, activity_json
@timeit(logger, 'on_leave')
def on_leave(data: dict, activity: Activity) -> (int, Union[str, None]):
"""
leave a room
:param data: activity streams format, needs actor.id (user id), actor.summary (user name), target.id (room id)
:param activity: the parsed activity, supplied by @pre_process decorator, NOT by calling endpoint
:return: if ok: {'status_code': 200}, else: {'status_code': 400, 'data': '<some error message>'}
"""
environ.env.observer.emit('on_leave', (data, activity))
return ECodes.OK, None
@timeit(logger, 'on_disconnect')
def on_disconnect() -> (int, None):
"""
when a client disconnects or the server no longer gets a ping response from the client
:return json if ok, {'status_code': 200}
"""
user_id = str(environ.env.session.get(SessionKeys.user_id.value))
data = {
'verb': 'disconnect',
'actor': {
'id': user_id
}
}
if not environ.env.config.get(ConfigKeys.TESTING):
if environ.env.connected_user_ids.get(user_id) == request.sid:
del environ.env.connected_user_ids[user_id]
activity = as_parser(data)
environ.env.observer.emit('on_disconnect', (data, activity))
return ECodes.OK, None
catch exception in on_list_room
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union
from activitystreams.models.activity import Activity
from activitystreams.models.defobject import DefObject
from flask import request
from dino.config import ApiTargets
from dino.config import ErrorCodes as ECodes
from dino.hooks import *
from dino.config import ApiActions
from dino.utils.decorators import timeit
from dino import validation
import logging
__author__ = 'Oscar Eriksson <oscar@thenetcircle.com>'
logger = logging.getLogger(__name__)
def connect() -> (int, None):
"""
connect to the server
:return: {'status_code': 200}
"""
return ECodes.OK, None
@timeit(logger, 'on_login')
def on_login(data: dict, activity: Activity) -> (int, Union[str, None]):
"""
event sent directly after a connection has successfully been made, to get the user_id for this connection
:param data: activity streams format, needs actor.id (user id) and actor.summary (user name)
:param activity: the parsed activity, supplied by @pre_process decorator, NOT by calling endpoint
:return: if ok: {'status_code': 200}, else: {'status_code': 400, 'data': '<some error message>'}
"""
user_id = environ.env.session.get(SessionKeys.user_id.value)
user_name = environ.env.session.get(SessionKeys.user_name.value)
if not environ.env.config.get(ConfigKeys.TESTING):
if str(user_id) in environ.env.connected_user_ids:
logger.info('a new connection for user ID %s, will disconnect previous one' % user_id)
environ.env.disconnect_by_sid(environ.env.connected_user_ids[str(user_id)])
environ.env.connected_user_ids[str(user_id)] = environ.env.request.sid
user_roles = utils.get_user_roles(user_id)
response = utils.activity_for_login(user_id, user_name)
response['actor']['attachments'] = list()
if len(user_roles['global']) > 0:
response['actor']['attachments'].append({
'objectType': 'global_role',
'content': ','.join(user_roles['global'])
})
for room_uuid, roles in user_roles['room'].items():
response['actor']['attachments'].append({
'objectType': 'room_role',
'id': room_uuid,
'content': ','.join(roles)
})
for channel_uuid, roles in user_roles['channel'].items():
response['actor']['attachments'].append({
'objectType': 'channel_role',
'id': channel_uuid,
'content': ','.join(roles)
})
environ.env.observer.emit('on_login', (data, activity))
return ECodes.OK, response
@timeit(logger, 'on_delete')
def on_delete(data: dict, activity: Activity):
environ.env.observer.emit('on_delete', (data, activity))
return ECodes.OK, None
@timeit(logger, 'on_message')
def on_message(data, activity: Activity):
"""
send any kind of message/event to a target user/room
object.url: target channel_id
target.id: target room_id
actor.id: sender user_id
actor.url: sender room_id
:param data: activity streams format, must include target.id (room/user id) and object.url (channel id)
:param activity: the parsed activity, supplied by @pre_process decorator, NOT by calling endpoint
:return: {'status_code': ECodes.OK, 'data': '<same AS as client sent, plus timestamp>'}
"""
room_id = activity.target.id
from_room_id = activity.actor.url
# only if cross-room should we broadcast the origin room id with the activity; less confusion for clients
if from_room_id is not None and from_room_id == room_id:
del data['actor']['url']
channel_id = None
if activity.target.object_type != 'room':
if hasattr(activity, 'object') and hasattr(activity.object, 'url'):
channel_id = activity.object.url
if channel_id is None or len(channel_id.strip()) == 0:
channel_id = utils.get_channel_for_room(room_id)
channel_name = utils.get_channel_name(channel_id)
if not hasattr(activity, 'object'):
activity.object = DefObject(dict())
activity.object.url = channel_id
activity.object.display_name = channel_name
if 'object' not in data or len(data['object']) == 0:
data['object'] = {
'url': activity.object.url,
'displayName': activity.object.display_name
}
else:
data['object']['url'] = activity.object.url
data['object']['displayName'] = activity.object.display_name
if from_room_id is not None and len(from_room_id.strip()) > 0:
activity.provider.url = utils.get_channel_for_room(from_room_id)
activity.provider.display_name = utils.get_channel_name(activity.provider.url)
if 'provider' not in data or len(data['provider']) == 0:
data['provider'] = {
'url': activity.provider.url,
'displayName': activity.provider.display_name
}
else:
data['provider']['url'] = activity.provider.url
data['provider']['displayName'] = activity.provider.display_name
if activity.target.object_type == 'room':
activity.target.display_name = utils.get_room_name(activity.target.id)
else:
activity.target.display_name = utils.get_user_name_for(activity.target.id)
activity.object.display_name = ''
activity.object.url = ''
activity.actor.display_name = utils.b64e(environ.env.session.get(SessionKeys.user_name.value))
data['actor']['displayName'] = activity.actor.display_name
data['target']['displayName'] = utils.b64e(activity.target.display_name)
data['object']['displayName'] = utils.b64e(activity.object.display_name)
environ.env.observer.emit('on_message', (data, activity))
return ECodes.OK, data
@timeit(logger, 'on_update_user_info')
def on_update_user_info(data: dict, activity: Activity) -> (int, Union[str, None]):
"""
broadcast a user info update to a room, or all rooms the user is in if no target.id specified
:param data: activity streams format, must include object.attachments (user info)
:param activity: the parsed activity, supplied by @pre_process decorator, NOT by calling endpoint
:return: {'status_code': ECodes.OK, 'data': '<same AS as client sent, plus timestamp>'}
"""
activity.actor.display_name = utils.b64e(environ.env.session.get(SessionKeys.user_name.value))
data['actor']['displayName'] = activity.actor.display_name
environ.env.observer.emit('on_update_user_info', (data, activity))
return ECodes.OK, data
@timeit(logger, 'on_ban')
def on_ban(data: dict, activity: Activity) -> (int, Union[str, None]):
"""
ban a user from a room (if user is an owner/admin/moderator)
target.id: the uuid of the room that the user is in
target.displayName: the room name
object.id: the id of the user to kick
object.content: the name of the user to kick
object.summary: the ban time, e.g.
actor.id: the id of the kicker
actor.content: the name of the kicker
:param data: activity streams format
:param activity: the parsed activity, supplied by @pre_process decorator, NOT by calling endpoint
:return: if ok: {'status_code': 200}, else: {'status_code': 400, 'data': '<error message>'}
"""
environ.env.observer.emit('on_ban', (data, activity))
environ.env.observer.emit('on_kick', (data, activity))
return ECodes.OK, None
@timeit(logger, 'on_kick')
def on_kick(data: dict, activity: Activity) -> (int, None):
"""
kick a user from a room (if user is an owner)
target.id: the uuid of the room that the user is in
target.displayName: the room name
object.id: the id of the user to kick
object.content: the name of the user to kick
actor.id: the id of the kicker
actor.content: the name of the kicker
:param data: activity streams format
:param activity: the parsed activity, supplied by @pre_process decorator, NOT by calling endpoint
:return: if ok: {'status_code': 200}, else: {'status_code': 400, 'data': '<error message>'}
"""
environ.env.observer.emit('on_kick', (data, activity))
return ECodes.OK, None
@timeit(logger, 'on_whisper')
def on_whisper(data: dict, activity: Activity) -> (int, None):
"""
whisper to another person in the same room, only that person will receive the event. Functions as a private message
:param data: activity streams format
:param activity: the parsed activity, supplied by @pre_process decorator, NOT by calling endpoint
:return: if ok: {'status_code': 200}, else: {'status_code': 400, 'data': '<error message>'}
"""
environ.env.observer.emit('on_whisper', (data, activity))
return ECodes.OK, None
@timeit(logger, 'on_invite')
def on_invite(data: dict, activity: Activity) -> (int, None):
"""
invite a user to the a room this user is in
:param data: activity streams format
:param activity: the parsed activity, supplied by @pre_process decorator, NOT by calling endpoint
:return: if ok: {'status_code': 200}, else: {'status_code': 400, 'data': '<error message>'}
"""
environ.env.observer.emit('on_invite', (data, activity))
return ECodes.OK, None
@timeit(logger, 'on_request_admin')
def on_request_admin(data: dict, activity: Activity) -> (int, None):
"""
request the presence of an admin in the current room
:param data: activity streams format
:param activity: the parsed activity, supplied by @pre_process decorator, NOT by calling endpoint
:return: if ok: {'status_code': 200}, else: {'status_code': 400, 'data': '<error message>'}
"""
online_admins = environ.env.db.get_online_admins()
if len(online_admins) == 0:
return ECodes.NO_ADMIN_ONLINE, 'no admin is online'
environ.env.observer.emit('on_request_admin', (data, activity))
return ECodes.OK, None
@timeit(logger, 'on_create')
def on_create(data: dict, activity: Activity) -> (int, dict):
"""
create a new room
:param data: activity streams format, must include target.display_name (room name) and object.id (channel id)
:param activity: the parsed activity, supplied by @pre_process decorator, NOT by calling endpoint
:return: if ok: {'status_code': ECodes.OK, 'data': '<same AS as in the request, with addition of target.id (generated UUID
for the new room>'}, else: {'status_code': 400, 'data': '<error message>'}
"""
# generate a uuid for this room
activity.target.id = str(uuid())
activity.target.object_type = 'room'
data['target']['id'] = activity.target.id
data['target']['objectType'] = activity.target.object_type
environ.env.observer.emit('on_create', (data, activity))
if hasattr(activity, 'object') and hasattr(activity.object, 'attachments'):
if activity.object.attachments is not None and len(activity.object.attachments) > 0:
environ.env.observer.emit('on_set_acl', (data, activity))
return ECodes.OK, data
@timeit(logger, 'on_set_acl')
def on_set_acl(data: dict, activity: Activity) -> (int, str):
"""
change ACL of a room; only allowed if the user is the owner of the room
:param data: activity streams, acls as attachments to object with object_type as acl name and content as acl value
:param activity: the parsed activity, supplied by @pre_process decorator, NOT by calling endpoint
:return: if ok: {'status_code': 200}, else: {'status_code': 400, 'data': '<some error message>'}
"""
environ.env.observer.emit('on_set_acl', (data, activity))
return ECodes.OK, None
@timeit(logger, 'on_report')
def on_report(data: dict, activity: Activity) -> (int, str):
"""
when a user reports a user based on a message
:param data: activity streams format dict
:param activity: the parsed activity, supplied by @pre_process decorator, NOT by calling endpoint
:return: if ok: {'status_code': 200}, else: {'status_code': 400, 'data': '<some error message>'}
"""
environ.env.observer.emit('on_report', (data, activity))
return ECodes.OK, None
@timeit(logger, 'on_get_acl')
def on_get_acl(data: dict, activity: Activity) -> (int, Union[str, dict]):
"""
change ACL of a room; only allowed if the user is the owner of the room
:param data: activity streams format
:param activity: the parsed activity, supplied by @pre_process decorator, NOT by calling endpoint
:return: if ok: {'status_code': 200}, else: {'status_code': 400, 'data': '<AS with acl as object.attachments>'}
"""
if activity.target.object_type == ApiTargets.CHANNEL:
acls = utils.get_acls_for_channel(activity.target.id)
else:
acls = utils.get_acls_for_room(activity.target.id)
environ.env.observer.emit('on_get_acl', (data, activity))
return ECodes.OK, utils.activity_for_get_acl(activity, acls)
@timeit(logger, 'on_status')
def on_status(data: dict, activity: Activity) -> (int, Union[str, None]):
"""
change online status
:param data: activity streams format, needs actor.id (user id), actor.summary (user name) and verb
(online/invisible/offline)
:param activity: the parsed activity, supplied by @pre_process decorator, NOT by calling endpoint
:return: if ok: {'status_code': 200}, else: {'status_code': 400, 'data': '<some error message>'}
"""
environ.env.observer.emit('on_status', (data, activity))
return ECodes.OK, None
@timeit(logger, 'on_history')
def on_history(data: dict, activity: Activity) -> (int, Union[str, None]):
"""
get the history of a room
the 'updated' field is optional, and if set history since that point will be returned (only if dino has been
configured with the history type 'unread' instead of 'top')
:param data: activity streams format
:param activity: the parsed activity, supplied by @pre_process decorator, NOT by calling endpoint
:return: if ok: {'status_code': 200}, else: {'status_code': 400, 'data': '<some error message>'}
"""
room_id = activity.target.id
user_id = activity.actor.id
last_read = activity.updated
messages = utils.get_history_for_room(room_id, user_id, last_read)
environ.env.observer.emit('on_history', (data, activity))
return ECodes.OK, utils.activity_for_history(activity, messages)
@timeit(logger, 'on_remove_room')
def on_remove_room(data: dict, activity: Activity) -> (int, Union[str, None]):
"""
remove a room
:param data: json dict in activity streams format
:param activity: the parsed activity, supplied by @pre_process decorator, NOT by calling endpoint
:return: if ok: {'status_code': 200}, else: {'status_code': 400, 'data': '<some error message>'}
"""
room_id = activity.target.id
room_name = utils.get_room_name(room_id)
channel_id = utils.get_channel_for_room(room_id)
reason = None
if hasattr(activity.object, 'content'):
reason = activity.object.content
remove_activity = utils.activity_for_remove_room(
activity.actor.id, activity.actor.display_name, room_id, room_name, reason)
environ.env.db.remove_room(channel_id, room_id)
environ.env.emit('gn_room_removed', remove_activity, broadcast=True, include_self=True)
environ.env.observer.emit('on_remove_room', (data, activity))
return ECodes.OK, utils.activity_for_room_removed(activity, room_name)
@timeit(logger, 'on_join')
def on_join(data: dict, activity: Activity) -> (int, Union[str, None]):
"""
join a room
:param data: activity streams format, need actor.id (user id), target.id (user id), actor.summary (user name)
:param activity: the parsed activity, supplied by @pre_process decorator, NOT by calling endpoint
:return: if ok: {'status_code': 200}, else: {'status_code': 400, 'data': '<some error message>'}
"""
room_id = activity.target.id
user_id = activity.actor.id
last_read = activity.updated
messages = utils.get_history_for_room(room_id, user_id, last_read)
owners = utils.get_owners_for_room(room_id)
acls = utils.get_acls_for_room(room_id)
users = utils.get_users_in_room(room_id, user_id, skip_cache=True)
environ.env.observer.emit('on_join', (data, activity))
return ECodes.OK, utils.activity_for_join(activity, acls, messages, owners, users)
@timeit(logger, 'on_users_in_room')
def on_users_in_room(data: dict, activity: Activity) -> (int, Union[dict, str]):
"""
get a list of users in a room
:param data: activity streams format, need target.id (room id)
:param activity: the parsed activity, supplied by @pre_process decorator, NOT by calling endpoint
:return: if ok, {'status_code': ECodes.OK, 'data': <AS with users as object.attachments>}
"""
# TODO: should people not in the room be able to list users in the room?
room_id = activity.target.id
user_id = activity.actor.id
users = utils.get_users_in_room(room_id, user_id)
environ.env.observer.emit('on_users_in_room', (data, activity))
return ECodes.OK, utils.activity_for_users_in_room(activity, users)
@timeit(logger, 'on_list_rooms')
def on_list_rooms(data: dict, activity: Activity) -> (int, Union[dict, str]):
"""
get a list of rooms
:param data: activity streams format, needs actor.id (user id) and object.id (channel id)
:param activity: the parsed activity, supplied by @pre_process decorator, NOT by calling endpoint
:return: if ok, {'status_code': ECodes.OK, 'data': <AS with rooms as object.attachments>}
"""
channel_id = activity.object.url
rooms = environ.env.db.rooms_for_channel(channel_id)
roles = utils.get_user_roles(environ.env.session.get(SessionKeys.user_id.value))
room_roles = roles['room']
filtered_rooms = dict()
for room_id, room_details in rooms.items():
try:
acls = utils.get_acls_in_room_for_action(room_id, ApiActions.LIST)
is_valid, err_msg = validation.acl.validate_acl_for_action(
activity, ApiTargets.ROOM, ApiActions.LIST, acls, target_id=room_id, object_type='room')
except Exception as e:
logger.warn('could not check acls for room %s in on_list_rooms: %s' % (room_id, str(e)))
continue
# if not allowed to join, don't show in list
if not is_valid:
continue
room_details['roles'] = ''
if room_id in room_roles.keys():
room_details['roles'] = ','.join(room_roles[room_id])
filtered_rooms[room_id] = room_details
environ.env.observer.emit('on_list_rooms', (data, activity))
activity_json = utils.activity_for_list_rooms(activity, filtered_rooms)
rooms_with_acls = activity_json['object']['attachments']
for room_info in rooms_with_acls:
acls = utils.get_acls_for_room(room_info['id'])
acl_activity = utils.activity_for_get_acl(activity, acls)
room_info['attachments'] = acl_activity['object']['attachments']
activity_json['object']['attachments'] = rooms_with_acls
return ECodes.OK, activity_json
@timeit(logger, 'on_list_channels')
def on_list_channels(data: dict, activity: Activity) -> (int, Union[dict, str]):
"""
get a list of channels
:param data: activity streams format, needs actor.id (user id)
:param activity: the parsed activity, supplied by @pre_process decorator, NOT by calling endpoint
:return: if ok, {'status_code': ECodes.OK, 'data': <AS with channels as object.attachments>}
"""
channels = environ.env.db.get_channels()
environ.env.observer.emit('on_list_channels', (data, activity))
activity_json = utils.activity_for_list_channels(activity, channels)
channels_with_acls = activity_json['object']['attachments']
filtered_channels = list()
for channel_info in channels_with_acls:
channel_id = channel_info['id']
list_acls = utils.get_acls_in_channel_for_action(channel_id, ApiActions.LIST)
activity.object.url = channel_id
activity.target.object_type = 'channel'
is_valid, err_msg = validation.acl.validate_acl_for_action(
activity, ApiTargets.CHANNEL, ApiActions.LIST, list_acls, target_id=channel_id, object_type='channel')
# not allowed to list this channel
if not is_valid:
continue
acls = utils.get_acls_for_channel(channel_id)
acl_activity = utils.activity_for_get_acl(activity, acls)
channel_info['attachments'] = acl_activity['object']['attachments']
filtered_channels.append(channel_info)
activity_json['object']['attachments'] = filtered_channels
return ECodes.OK, activity_json
@timeit(logger, 'on_leave')
def on_leave(data: dict, activity: Activity) -> (int, Union[str, None]):
"""
leave a room
:param data: activity streams format, needs actor.id (user id), actor.summary (user name), target.id (room id)
:param activity: the parsed activity, supplied by @pre_process decorator, NOT by calling endpoint
:return: if ok: {'status_code': 200}, else: {'status_code': 400, 'data': '<some error message>'}
"""
environ.env.observer.emit('on_leave', (data, activity))
return ECodes.OK, None
@timeit(logger, 'on_disconnect')
def on_disconnect() -> (int, None):
"""
when a client disconnects or the server no longer gets a ping response from the client
:return json if ok, {'status_code': 200}
"""
user_id = str(environ.env.session.get(SessionKeys.user_id.value))
data = {
'verb': 'disconnect',
'actor': {
'id': user_id
}
}
if not environ.env.config.get(ConfigKeys.TESTING):
if environ.env.connected_user_ids.get(user_id) == request.sid:
del environ.env.connected_user_ids[user_id]
activity = as_parser(data)
environ.env.observer.emit('on_disconnect', (data, activity))
return ECodes.OK, None
|
from __future__ import unicode_literals
import re
import os
import spotipy.util as util
import youtube_dl
from spotify_dl.scaffold import *
def authenticate():
"""Authenticates you to Spotify
"""
scope = 'user-library-read'
username = ''
return util.prompt_for_user_token(username, scope)
def fetch_tracks(sp, playlist, user_id):
"""Fetches tracks from Spotify user's saved
tracks or from playlist(if playlist parameter is passed
and saves song name and artist name to songs list
"""
log.debug('Fetching saved tracks')
offset = 0
songs_dict = {}
if user_id is None:
current_user_id = sp.current_user()['id']
else:
current_user_id = user_id
while True:
if playlist is None:
results = sp.current_user_saved_tracks(limit=50, offset=offset)
else:
results = sp.user_playlist_tracks(current_user_id, playlist, None,
limit=50, offset=offset)
log.debug('Got result json {}'.format(results))
for item in results['items']:
track = item['track']
if track['name'] is not None:
track_name = str(track['name'])
if track['artists'][0]['name'] is not None:
track_artist = str(track['artists'][0]['name'])
log.debug('Appending {} to'
'songs list'.format(track['name'] + ' - ' +
track['artists'][0]['name']))
if (track['name'] is None) or (track['artists'][0]['name'] is None):
log.warning("Track/artist name for {} not found, skipping").format(track)
else:
songs_dict.update({track_name: track_artist})
offset += 1
if results.get('next') is None:
log.info('All pages fetched, time to leave.'
' Added {} songs in total'.format(offset))
break
return songs_dict
def save_songs_to_file(songs, directory):
"""
:param songs
Saves the songs fetched from fetch_tracks function to songs.txt file
to be downloaded from youtube-dl
"""
with open(os.path.join(directory, 'songs.txt'), 'w') as f:
f.write(' '.join(str(songs)))
f.close()
def download_songs(info, download_directory):
"""
Downloads songs from the YouTube URL passed to either
current directory or download_directory, is it is passed
"""
for item in info:
log.debug('Songs to download: {}'.format(item))
url_, track_, artist_ = item
download_archive = download_directory + 'downloaded_songs.txt'
outtmpl = download_directory + '%(title)s.%(ext)s'
ydl_opts = {
'format': 'bestaudio/best',
'download_archive': download_archive,
'outtmpl': outtmpl,
'noplaylist': True,
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
},
{'key': 'FFmpegMetadata'},
],
'postprocessor_args': ['-metadata', 'title=' + str(track_)],
}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
try:
log.debug(ydl.download([url_]))
except Exception as e:
log.debug(e)
print('Failed to download: {}'.format(url_))
continue
def extract_user_and_playlist_from_uri(uri):
playlist_re = re.compile("spotify:user:[\w,.]+:playlist:[\w]+")
for playlist_uri in playlist_re.findall(uri):
segments = playlist_uri.split(":")
user_id = segments[2]
log.info('List owner: ' + str(user_id))
playlist_id = segments[4]
log.info('List ID: ' + str(playlist_id))
return user_id, playlist_id
def playlist_name(uri, sp):
user_id, playlist_id = extract_user_and_playlist_from_uri(uri)
return get_playlist_name_from_id(playlist_id, user_id, sp)
def get_playlist_name_from_id(playlist_id, user_id, sp):
playlist = sp.user_playlist(user_id, playlist_id,
fields="tracks, next, name")
name = playlist['name']
return name
check if track itself is none. Fixes #31
from __future__ import unicode_literals
import re
import os
import spotipy.util as util
import youtube_dl
from spotify_dl.scaffold import *
def authenticate():
"""Authenticates you to Spotify
"""
scope = 'user-library-read'
username = ''
return util.prompt_for_user_token(username, scope)
def fetch_tracks(sp, playlist, user_id):
"""Fetches tracks from Spotify user's saved
tracks or from playlist(if playlist parameter is passed
and saves song name and artist name to songs list
"""
log.debug('Fetching saved tracks')
offset = 0
songs_dict = {}
if user_id is None:
current_user_id = sp.current_user()['id']
else:
current_user_id = user_id
while True:
if playlist is None:
results = sp.current_user_saved_tracks(limit=50, offset=offset)
else:
results = sp.user_playlist_tracks(current_user_id, playlist, None,
limit=50, offset=offset)
log.debug('Got result json %s', results)
for item in results['items']:
track = item['track']
if track is not None:
track_name = str(track['name'])
track_artist = str(track['artists'][0]['name'])
log.debug('Appending %s to'
'songs list', (track['name'] + ' - ' + track['artists'][0]['name']))
songs_dict.update({track_name: track_artist})
else:
log.warning("Track/artist name for %s not found, skipping", track)
offset += 1
if results.get('next') is None:
log.info('All pages fetched, time to leave.'
' Added %s songs in total', offset)
break
return songs_dict
def save_songs_to_file(songs, directory):
"""
:param songs
Saves the songs fetched from fetch_tracks function to songs.txt file
to be downloaded from youtube-dl
"""
with open(os.path.join(directory, 'songs.txt'), 'w') as f:
f.write(' '.join(str(songs)))
f.close()
def download_songs(info, download_directory):
"""
Downloads songs from the YouTube URL passed to either
current directory or download_directory, is it is passed
"""
for item in info:
log.debug('Songs to download: %s', item)
url_, track_, artist_ = item
download_archive = download_directory + 'downloaded_songs.txt'
outtmpl = download_directory + '%(title)s.%(ext)s'
ydl_opts = {
'format': 'bestaudio/best',
'download_archive': download_archive,
'outtmpl': outtmpl,
'noplaylist': True,
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
},
{'key': 'FFmpegMetadata'},
],
'postprocessor_args': ['-metadata', 'title=' + str(track_)],
}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
try:
log.debug(ydl.download([url_]))
except Exception as e:
log.debug(e)
print('Failed to download: {}'.format(url_))
continue
def extract_user_and_playlist_from_uri(uri):
playlist_re = re.compile("spotify:user:[\w,.]+:playlist:[\w]+")
for playlist_uri in playlist_re.findall(uri):
segments = playlist_uri.split(":")
user_id = segments[2]
log.info('List owner: ' + str(user_id))
playlist_id = segments[4]
log.info('List ID: ' + str(playlist_id))
return user_id, playlist_id
def playlist_name(uri, sp):
user_id, playlist_id = extract_user_and_playlist_from_uri(uri)
return get_playlist_name_from_id(playlist_id, user_id, sp)
def get_playlist_name_from_id(playlist_id, user_id, sp):
playlist = sp.user_playlist(user_id, playlist_id,
fields="tracks, next, name")
name = playlist['name']
return name
|
import sys
import numpy as np
from utils import *
from PIL import Image
from os import listdir
from os.path import isfile, join
image_folder = "images/"
sprite_size = 8
colors = {
( 0, 0, 0, 255): (0,0),
(255, 0, 0, 255): (0,1),
( 0, 255, 0, 255): (1,0),
( 0, 0, 255, 255): (1,1),
( 0, 0, 0): (0,0),
(255, 0, 0): (0,1),
( 0, 255, 0): (1,0),
( 0, 0, 255): (1,1),
( 0, 0): (0,0),
( 0, 255): (0,1)
}
image_files = [ f for f in listdir(image_folder) if isfile(join(image_folder,f)) ]
sprites = []
for image_file in image_files:
im = Image.open(image_folder + image_file)
pixel_width, pixel_height = im.size
if (pixel_width % sprite_size != 0):
print "width not divisible by 8, yo"
sys.exit(1)
if (pixel_height % sprite_size != 0):
print "height not divisible by 8, yo"
sys.exit(1)
# read pixels into array
try:
pixels = np.array(im.getdata(), dtype=('int, int, int, int'))
except:
try:
pixels = np.array(im.getdata(), dtype=('int, int, int'))
except:
pixels = np.array(im.getdata(), dtype=('int, int'))
# unflatted array into image size
pixels.shape = (pixel_height, pixel_width)
# block pixels into sprites
sprites = sprites + blockshaped(pixels, sprite_size, sprite_size).tolist()
sprite_memory = []
for sprite in sprites:
for line in sprite:
decomposed_bytes = ["",""]
for pixel in [tuple(p) for p in line]:
if pixel not in colors:
print "Invalid color found"
sys.exit(1)
decomposed_bytes[0] += str(colors[pixel][1])
decomposed_bytes[1] += str(colors[pixel][0])
sprite_memory.append("".join(decomposed_bytes))
output = open("sprite_memory.coe", 'w')
output.truncate()
output.write("memory_initialization_radix=2;\nmemory_initialization_vector=\n")
output.write(",\n".join(sprite_memory))
output.write(";")
output.close
added checking for amount of sprites you are generating
import sys
import numpy as np
from utils import *
from PIL import Image
from os import listdir
from os.path import isfile, join
memory_size = 2048
image_folder = "images/"
sprite_size = 8
colors = {
( 0, 0, 0, 255): (0,0),
(255, 0, 0, 255): (0,1),
( 0, 255, 0, 255): (1,0),
( 0, 0, 255, 255): (1,1),
( 0, 0, 0): (0,0),
(255, 0, 0): (0,1),
( 0, 255, 0): (1,0),
( 0, 0, 255): (1,1),
( 0, 0): (0,0),
( 0, 255): (0,1)
}
image_files = [ f for f in listdir(image_folder) if isfile(join(image_folder,f)) ]
sprites = []
for image_file in image_files:
im = Image.open(image_folder + image_file)
pixel_width, pixel_height = im.size
if (pixel_width % sprite_size != 0):
print "width not divisible by 8, yo"
sys.exit(1)
if (pixel_height % sprite_size != 0):
print "height not divisible by 8, yo"
sys.exit(1)
# read pixels into array
try:
pixels = np.array(im.getdata(), dtype=('int, int, int, int'))
except:
try:
pixels = np.array(im.getdata(), dtype=('int, int, int'))
except:
pixels = np.array(im.getdata(), dtype=('int, int'))
# unflatted array into image size
pixels.shape = (pixel_height, pixel_width)
# block pixels into sprites
sprites = sprites + blockshaped(pixels, sprite_size, sprite_size).tolist()
sprite_memory = []
for sprite in sprites:
for line in sprite:
decomposed_bytes = ["",""]
for pixel in [tuple(p) for p in line]:
if pixel not in colors:
print "Invalid color found"
sys.exit(1)
decomposed_bytes[0] += str(colors[pixel][1])
decomposed_bytes[1] += str(colors[pixel][0])
sprite_memory.append("".join(decomposed_bytes))
if len(sprite_memory) > memory_size:
print "Too many sprites!"
print "You have {0} sprites, but the max is {1}".format(len(sprite_memory) / sprite_size, memory_size / sprite_size)
print "Aborting..."
else:
print "Generating {0} sprites of a maximum {1} sprites".format(len(sprite_memory) / sprite_size, memory_size / sprite_size)
output = open("sprite_memory.coe", 'w')
output.truncate()
output.write("memory_initialization_radix=2;\nmemory_initialization_vector=\n")
output.write(",\n".join(sprite_memory))
output.write(";")
output.close |
# (c) Stefan Countryman 2017
# Set up an interactive environment for handling GWHEN work.
import sys
import os
# used to save data in matlab format
import IPython.core.magic
import scipy.io.matlab
import subprocess
# look in both places where GWHEN software tends to hide
GWHEN_DIRS = ['~/multimessenger-pipeline', '~/dev/multimessenger-pipeline']
for gwhendir in [os.path.expanduser(d) for d in GWHEN_DIRS]:
if os.path.exists(gwhendir):
print('GWHEN dir found: {}'.format(gwhendir))
sys.path.append(gwhendir)
sys.path.append(os.path.join(gwhendir, 'gwhen', 'bin'))
sys.path.append(os.path.join(gwhendir, 'gwhen', 'file_handlers'))
import gwhen
try:
from gwhen.file_handlers.icecube_utils import realtime_tools
except ImportError:
print("Failed to load IceCube realtime_tools.")
try:
from gwhen.utils import zen_az2ra_dec, ra_dec2zen_az
except ImportError:
print("Failed to load gwhen.utils coordinate conversions.")
# coordinate conversions for IceCube zenith/azimuth <-> RA/Dec
from gwhen.utils import zen_az2ra_dec
from gwhen.utils import ra_dec2zen_az
# initialize an event here with variable name "e" for quick work.
print('Setting `e` to an event in the current working directory...')
try:
e = gwhen.Event.fromdir()
except AttributeError as e:
print('Failed, looks like an old version of GWHEN sans fromdir.')
@IPython.core.magic.register_line_magic
def gopen(line):
"""Open a gwhen file handler. If a valid filehandler object name is
provided, then that file handler is opened."""
fh = eval(line)
subprocess.Popen(['open', fh.fullpath])
del gopen
@IPython.core.magic.register_line_magic
def gql(line):
"""Open a gwhen file handler file in quicklook. If a valid filehandler
object name is provided, then that file handler is opened."""
fh = eval(line)
subprocess.Popen(['ql', fh.fullpath])
del gql
@IPython.core.magic.register_line_magic
def gcat(line):
"""Imgcat this file to the command line. Good for a very quick preview."""
fh = eval(line)
subprocess.Popen(['imgcat', fh.fullpath])
del gcat
gwhen -> llama in igwhen ipython profile
# (c) Stefan Countryman 2017
# Set up an interactive environment for handling GWHEN work.
import sys
import os
# used to save data in matlab format
import IPython.core.magic
import scipy.io.matlab
import subprocess
import llama
try:
from llama.file_handlers.icecube_utils import realtime_tools
except ImportError:
print("Failed to load IceCube realtime_tools.")
try:
from llama.utils import zen_az2ra_dec, ra_dec2zen_az
except ImportError:
print("Failed to load llama.utils coordinate conversions.")
# coordinate conversions for IceCube zenith/azimuth <-> RA/Dec
from llama.utils import zen_az2ra_dec
from llama.utils import ra_dec2zen_az
# initialize an event here with variable name "e" for quick work.
print('Setting `e` to an event in the current working directory...')
try:
e = llama.Event.fromdir()
except AttributeError as e:
print('Failed, looks like an old version of GWHEN sans fromdir.')
@IPython.core.magic.register_line_magic
def gopen(line):
"""Open a llama file handler. If a valid filehandler object name is
provided, then that file handler is opened."""
fh = eval(line)
subprocess.Popen(['open', fh.fullpath])
del gopen
@IPython.core.magic.register_line_magic
def gql(line):
"""Open a llama file handler file in quicklook. If a valid filehandler
object name is provided, then that file handler is opened."""
fh = eval(line)
subprocess.Popen(['ql', fh.fullpath])
del gql
@IPython.core.magic.register_line_magic
def gcat(line):
"""Imgcat this file to the command line. Good for a very quick preview."""
fh = eval(line)
subprocess.Popen(['imgcat', fh.fullpath])
del gcat
|
#
# spyne - Copyright (C) Spyne contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
"""The ``spyne.protocol.xml`` module contains an xml-based protocol that
serializes python objects to xml using Xml Schema conventions.
Logs valid documents to ``'spyne.protocol.xml'`` and invalid documents to
``spyne.protocol.xml.invalid``. Use the usual ``logging.getLogger()`` and
friends to configure how these get logged.
Warning! You can get a lot of crap in the 'invalid' logger. You're not advised
to turn it on for a production system.
"""
import logging
logger = logging.getLogger('spyne.protocol.xml')
logger_invalid = logging.getLogger('spyne.protocol.xml.invalid')
from inspect import isgenerator
from collections import defaultdict
from lxml import etree
from lxml import html
from lxml.builder import E
from lxml.etree import XMLSyntaxError
from lxml.etree import XMLParser
from spyne import BODY_STYLE_WRAPPED
from spyne.util import Break, coroutine
from spyne.util.six import text_type, string_types
from spyne.util.cdict import cdict
from spyne.util.etreeconv import etree_to_dict, dict_to_etree,\
root_dict_to_etree
from spyne.const.xml import XSI, NS_SOAP11_ENC
from spyne.error import Fault
from spyne.error import ValidationError
from spyne.const.ansi_color import LIGHT_GREEN
from spyne.const.ansi_color import LIGHT_RED
from spyne.const.ansi_color import END_COLOR
from spyne.const.xml import NS_SOAP11_ENV
from spyne.const.xml import PREFMAP, DEFAULT_NS
from spyne.model import Any, ModelBase, Array, Iterable, ComplexModelBase, \
AnyHtml, AnyXml, AnyDict, Unicode, PushBase, File, ByteArray, XmlData, \
XmlAttribute
from spyne.model.binary import BINARY_ENCODING_BASE64
from spyne.model.enum import EnumBase
from spyne.protocol import ProtocolBase
from spyne.util import six
if six.PY2:
STR_TYPES = (str, unicode)
else:
STR_TYPES = (str, bytes)
NIL_ATTR = {XSI('nil'): 'true'}
XSI_TYPE = XSI('type')
def _append(parent, child_elt):
if hasattr(parent, 'append'):
parent.append(child_elt)
else:
parent.write(child_elt)
def _gen_tagname(ns, name):
if ns is not None:
name = "{%s}%s" % (ns, name)
return name
class SchemaValidationError(Fault):
"""Raised when the input stream could not be validated by the Xml Schema."""
CODE = 'Client.SchemaValidationError'
def __init__(self, faultstring):
super(SchemaValidationError, self).__init__(self.CODE, faultstring)
class SubXmlBase(ProtocolBase):
def subserialize(self, ctx, cls, inst, parent, ns=None, name=None):
return self.to_parent(ctx, cls, inst, parent, name)
def to_parent(self, ctx, cls, inst, parent, ns, *args, **kwargs):
"""Serializes inst to an Element instance and appends it to the 'parent'.
:param self: The protocol that will be used to serialize the given
value.
:param cls: The type of the value that's going to determine how to
pack the given value.
:param inst: The value to be set for the 'text' element of the newly
created SubElement
:param parent: The parent Element to which the new child will be
appended.
:param ns: The target namespace of the new SubElement, used with
'name' to set the tag.
:param name: The tag name of the new SubElement, 'retval' by default.
"""
raise NotImplementedError()
class XmlDocument(SubXmlBase):
"""The Xml input and output protocol, using the information from the Xml
Schema generated by Spyne types.
See the following material for more (much much more!) information.
* http://www.w3.org/TR/xmlschema-0/
* http://www.w3.org/TR/xmlschema-1/
* http://www.w3.org/TR/xmlschema-2/
Receiving Xml from untrusted sources is a dodgy security dance as the Xml
attack surface is /huge/.
Spyne's ```lxml.etree.XMLParser``` instance has ```resolve_pis```,
```load_dtd```, ```resolve_entities```, ```dtd_validation```,
```huge_tree``` Defaults to ``False``
Having ```resolve_entities``` disabled will prevent the 'lxml' validation
for documents with custom xml entities defined in the DTD. See the example
in examples/xml/validation_error to play with the settings that work best
for you. Please note that enabling ```resolve_entities``` is a security
hazard that can lead to disclosure of sensitive information.
See https://pypi.python.org/pypi/defusedxml for a pragmatic overview of
Xml security in Python world.
:param app: The owner application instance.
:param validator: One of (None, 'soft', 'lxml', 'schema',
ProtocolBase.SOFT_VALIDATION, XmlDocument.SCHEMA_VALIDATION).
Both ``'lxml'`` and ``'schema'`` values are equivalent to
``XmlDocument.SCHEMA_VALIDATION``.
Defaults to ``None``.
:param replace_null_with_default: If ``False``, does not replace incoming
explicit null values with denoted default values. This is against Xml
Schema standard but consistent with other Spyne protocol
implementations. Set this to False if you want cross-protocol
compatibility.
Defaults to ``True``.
Relevant quote from xml schema primer
(http://www.w3.org/TR/xmlschema-0/):
..
When a value is present and is null The schema processor treats
defaulted elements slightly differently. When an element is declared
with a default value, the value of the element is whatever value
appears as the element's content in the instance document; if the
element appears without any content, the schema processor provides
the element with a value equal to that of the default attribute.
However, if the element does not appear in the instance document,
the schema processor does not provide the element at all. In
summary, the differences between element and attribute defaults can
be stated as: Default attribute values apply when attributes are
missing, and default element values apply when elements are empty.
:param xml_declaration: Whether to add xml_declaration to the responses
Defaults to ``True``.
:param cleanup_namespaces: Whether to add clean up namespace declarations
in the response document.
Defaults to ``True``.
:param encoding: The suggested string encoding for the returned xml
documents. The transport can override this.
Defaults to ``None``.
:param pretty_print: When ``True``, returns the document in a pretty-printed
format.
Defaults to ``False``.
:param parse_xsi_type: Set to ``False`` to disable parsing of ``xsi:type``
attribute, effectively disabling polymorphism.
Defaults to ``True``.
The following are passed straight to the ``XMLParser()`` instance from
lxml. Docs are also plagiarized from the lxml documentation. Please note
that some of the defaults are different to make parsing safer by default.
:param attribute_defaults: read the DTD (if referenced by the document) and
add the default attributes from it.
Defaults to ``False``
:param dtd_validation: validate while parsing (if a DTD was referenced).
Defaults to ``False``
:param load_dtd: load and parse the DTD while parsing (no validation is
performed).
Defaults to ``False``.
:param no_network: prevent network access when looking up external
documents.
Defaults to ``True``.
:param ns_clean: try to clean up redundant namespace declarations.
Please note that this is for incoming documents.
See ``cleanup_namespaces`` parameter for output documents.
Defaults to ``False``.
:param recover: try hard to parse through broken Xml.
Defaults to ``False``.
:param remove_blank_text: discard blank text nodes between tags, also known
as ignorable whitespace. This is best used together with a DTD or schema
(which tells data and noise apart), otherwise a heuristic will be
applied.
Defaults to ``False``.
:param remove_pis: When ``True`` xml parser discards processing
instructions.
Defaults to ``True``.
:param strip_cdata: replace CDATA sections by normal text content.
Defaults to ``True``
:param resolve_entities: replace entities by their text value.
Defaults to ``False``.
:param huge_tree: disable security restrictions and support very deep trees
and very long text content. (only affects libxml2 2.7+)
Defaults to ``False``.
:param compact: use compact storage for short text content.
Defaults to ``True``.
"""
SCHEMA_VALIDATION = type("Schema", (object,), {})
mime_type = 'text/xml'
default_binary_encoding = BINARY_ENCODING_BASE64
type = set(ProtocolBase.type)
type.add('xml')
soap_env = PREFMAP[NS_SOAP11_ENV]
ns_soap_env = NS_SOAP11_ENV
ns_soap_enc = NS_SOAP11_ENC
def __init__(self, app=None, validator=None,
replace_null_with_default=True,
xml_declaration=True,
cleanup_namespaces=True, encoding=None, pretty_print=False,
attribute_defaults=False,
dtd_validation=False,
load_dtd=False,
no_network=True,
ns_clean=False,
recover=False,
remove_blank_text=False,
remove_pis=True,
strip_cdata=True,
resolve_entities=False,
huge_tree=False,
compact=True,
binary_encoding=None,
parse_xsi_type=True,
polymorphic=False,
):
super(XmlDocument, self).__init__(app, validator,
binary_encoding=binary_encoding)
self.validation_schema = None
self.xml_declaration = xml_declaration
self.cleanup_namespaces = cleanup_namespaces
self.replace_null_with_default = replace_null_with_default
if encoding is None:
self.encoding = 'UTF-8'
else:
self.encoding = encoding
self.polymorphic = polymorphic
self.pretty_print = pretty_print
self.parse_xsi_type = parse_xsi_type
self.serialization_handlers = cdict({
Any: self.any_to_parent,
Fault: self.fault_to_parent,
EnumBase: self.enum_to_parent,
AnyXml: self.any_xml_to_parent,
XmlData: self.xmldata_to_parent,
AnyDict: self.any_dict_to_parent,
AnyHtml: self.any_html_to_parent,
ModelBase: self.modelbase_to_parent,
ByteArray: self.byte_array_to_parent,
ComplexModelBase: self.complex_to_parent,
XmlAttribute: self.xmlattribute_to_parent,
SchemaValidationError: self.schema_validation_error_to_parent,
})
self.deserialization_handlers = cdict({
AnyHtml: self.html_from_element,
AnyXml: self.xml_from_element,
Any: self.xml_from_element,
Array: self.array_from_element,
Fault: self.fault_from_element,
AnyDict: self.dict_from_element,
EnumBase: self.enum_from_element,
ModelBase: self.base_from_element,
Unicode: self.unicode_from_element,
Iterable: self.iterable_from_element,
ByteArray: self.byte_array_from_element,
ComplexModelBase: self.complex_from_element,
})
self.parser_kwargs = dict(
attribute_defaults=attribute_defaults,
dtd_validation=dtd_validation,
load_dtd=load_dtd,
no_network=no_network,
ns_clean=ns_clean,
recover=recover,
remove_blank_text=remove_blank_text,
remove_comments=True,
remove_pis=remove_pis,
strip_cdata=strip_cdata,
resolve_entities=resolve_entities,
huge_tree=huge_tree,
compact=compact,
encoding=encoding,
)
def set_validator(self, validator):
if validator in ('lxml', 'schema') or \
validator is self.SCHEMA_VALIDATION:
self.validate_document = self.__validate_lxml
self.validator = self.SCHEMA_VALIDATION
elif validator == 'soft' or validator is self.SOFT_VALIDATION:
self.validator = self.SOFT_VALIDATION
elif validator is None:
pass
else:
raise ValueError(validator)
self.validation_schema = None
def validate_body(self, ctx, message):
"""Sets ctx.method_request_string and calls :func:`generate_contexts`
for validation."""
assert message in (self.REQUEST, self.RESPONSE), message
line_header = LIGHT_RED + "Error:" + END_COLOR
try:
self.validate_document(ctx.in_body_doc)
if message is self.REQUEST:
line_header = LIGHT_GREEN + "Method request string:" + END_COLOR
else:
line_header = LIGHT_RED + "Response:" + END_COLOR
finally:
if logger.level == logging.DEBUG:
logger.debug("%s %s" % (line_header, ctx.method_request_string))
logger.debug(etree.tostring(ctx.in_document, pretty_print=True))
def set_app(self, value):
ProtocolBase.set_app(self, value)
self.validation_schema = None
if self.validator is self.SCHEMA_VALIDATION and value is not None:
from spyne.interface.xml_schema import XmlSchema
xml_schema = XmlSchema(value.interface)
xml_schema.build_validation_schema()
self.validation_schema = xml_schema.validation_schema
def __validate_lxml(self, payload):
ret = self.validation_schema.validate(payload)
logger.debug("Validated ? %r" % ret)
if ret == False:
error_text = text_type(self.validation_schema.error_log.last_error)
raise SchemaValidationError(error_text.encode('ascii',
'xmlcharrefreplace'))
def create_in_document(self, ctx, charset=None):
"""Uses the iterable of string fragments in ``ctx.in_string`` to set
``ctx.in_document``."""
string = b''.join(ctx.in_string)
try:
try:
ctx.in_document = etree.fromstring(string,
parser=XMLParser(**self.parser_kwargs))
except ValueError:
logger.debug('ValueError: Deserializing from unicode strings '
'with encoding declaration is not supported by '
'lxml.')
ctx.in_document = etree.fromstring(string.decode(charset),
self.parser)
except XMLSyntaxError as e:
logger_invalid.error("%r in string %r", e, string)
raise Fault('Client.XMLSyntaxError', str(e))
def decompose_incoming_envelope(self, ctx, message):
assert message in (self.REQUEST, self.RESPONSE)
ctx.in_header_doc = None # If you need header support, you should use Soap
ctx.in_body_doc = ctx.in_document
ctx.method_request_string = ctx.in_body_doc.tag
self.validate_body(ctx, message)
def from_element(self, ctx, cls, element):
cls_attrs = self.get_cls_attrs(cls)
if bool(element.get(XSI('nil'))):
if self.validator is self.SOFT_VALIDATION and not \
cls_attrs.nillable:
raise ValidationError(None)
if self.replace_null_with_default:
return cls_attrs.default
return None
# if present, use the xsi:type="ns0:ObjectName"
# attribute to instantiate subclass objects
if self.parse_xsi_type:
xsi_type = element.get(XSI_TYPE, None)
if xsi_type is not None:
if ":" in xsi_type:
prefix, objtype = xsi_type.split(':', 1)
else:
prefix, objtype = None, xsi_type
ns = element.nsmap.get(prefix)
if ns is not None:
classkey = "{%s}%s" % (ns, objtype)
else:
logger.error("xsi:type namespace prefix "
"'%s' in '%s' not found in %r",
ns, xsi_type, element.nsmap)
raise ValidationError(xsi_type)
newclass = ctx.app.interface.classes.get(classkey, None)
if newclass is None:
logger.error("xsi:type '%s' interpreted as class key '%s' "
"is not recognized", xsi_type, classkey)
raise ValidationError(xsi_type)
cls = newclass
logger.debug("xsi:type '%s' overrides %r to %r", xsi_type,
cls, newclass)
handler = self.deserialization_handlers[cls]
return handler(ctx, cls, element)
def to_parent(self, ctx, cls, inst, parent, ns, *args, **kwargs):
cls, add_type = self.get_polymorphic_target(cls, inst)
cls_attrs = self.get_cls_attrs(cls)
subprot = cls_attrs.prot
if subprot is not None and isinstance(subprot, SubXmlBase):
return subprot.subserialize(ctx, cls, inst, parent, ns,
*args, **kwargs)
handler = self.serialization_handlers[cls]
if inst is None:
inst = cls_attrs.default
if inst is None:
return self.null_to_parent(ctx, cls, inst, parent, ns,
*args, **kwargs)
if cls_attrs.exc:
return
kwargs['add_type'] = add_type
return handler(ctx, cls, inst, parent, ns, *args, **kwargs)
def deserialize(self, ctx, message):
"""Takes a MethodContext instance and a string containing ONE root xml
tag.
Returns the corresponding native python object.
Not meant to be overridden.
"""
assert message in (self.REQUEST, self.RESPONSE)
self.event_manager.fire_event('before_deserialize', ctx)
if ctx.descriptor is None:
if ctx.in_error is None:
raise Fault("Client", "Method %r not found." %
ctx.method_request_string)
else:
raise ctx.in_error
if message is self.REQUEST:
body_class = ctx.descriptor.in_message
elif message is self.RESPONSE:
body_class = ctx.descriptor.out_message
# decode method arguments
if ctx.in_body_doc is None:
ctx.in_object = [None] * len(body_class._type_info)
else:
ctx.in_object = self.from_element(ctx, body_class, ctx.in_body_doc)
if logger.level == logging.DEBUG and message is self.REQUEST:
line_header = '%sRequest%s' % (LIGHT_GREEN, END_COLOR)
outdoc_str = None
if ctx.out_document is not None:
outdoc_str = etree.tostring(ctx.out_document,
xml_declaration=self.xml_declaration, pretty_print=True)
logger.debug("%s %s" % (line_header, outdoc_str))
self.event_manager.fire_event('after_deserialize', ctx)
def serialize(self, ctx, message):
"""Uses ``ctx.out_object``, ``ctx.out_header`` or ``ctx.out_error`` to
set ``ctx.out_body_doc``, ``ctx.out_header_doc`` and
``ctx.out_document`` as an ``lxml.etree._Element instance``.
Not meant to be overridden.
"""
assert message in (self.REQUEST, self.RESPONSE)
self.event_manager.fire_event('before_serialize', ctx)
if ctx.out_error is not None:
tmp_elt = etree.Element('punk')
retval = self.to_parent(ctx, ctx.out_error.__class__, ctx.out_error,
tmp_elt, self.app.interface.get_tns())
ctx.out_document = tmp_elt[0]
else:
if message is self.REQUEST:
result_message_class = ctx.descriptor.in_message
elif message is self.RESPONSE:
result_message_class = ctx.descriptor.out_message
# assign raw result to its wrapper, result_message
if ctx.descriptor.body_style == BODY_STYLE_WRAPPED:
result_inst = result_message_class()
for i, (k, v) in enumerate(
result_message_class._type_info.items()):
attrs = self.get_cls_attrs(v)
result_inst._safe_set(k, ctx.out_object[i], v, attrs)
else:
result_inst = ctx.out_object
if ctx.out_stream is None:
tmp_elt = etree.Element('punk')
retval = self.to_parent(ctx, result_message_class,
result_inst, tmp_elt, self.app.interface.get_tns())
ctx.out_document = tmp_elt[0]
else:
retval = self.incgen(ctx, result_message_class,
result_inst, self.app.interface.get_tns())
if self.cleanup_namespaces and ctx.out_document is not None:
etree.cleanup_namespaces(ctx.out_document)
self.event_manager.fire_event('after_serialize', ctx)
return retval
def create_out_string(self, ctx, charset=None):
"""Sets an iterable of string fragments to ctx.out_string"""
if charset is None:
charset = self.encoding
ctx.out_string = [etree.tostring(ctx.out_document,
encoding=charset,
pretty_print=self.pretty_print,
xml_declaration=self.xml_declaration)]
if logger.level == logging.DEBUG:
logger.debug('%sResponse%s %s' % (LIGHT_RED, END_COLOR,
etree.tostring(ctx.out_document,
pretty_print=True, encoding='UTF-8')))
@coroutine
def incgen(self, ctx, cls, inst, ns, name=None):
if name is None:
name = cls.get_type_name()
with etree.xmlfile(ctx.out_stream) as xf:
ret = self.to_parent(ctx, cls, inst, xf, ns, name)
if isgenerator(ret):
try:
while True:
y = (yield) # may throw Break
ret.send(y)
except Break:
try:
ret.throw(Break())
except StopIteration:
pass
if hasattr(ctx.out_stream, 'finish'):
ctx.out_stream.finish()
def _gen_tag(self, cls, ns, name, add_type=False, **_):
if ns is not None:
name = "{%s}%s" % (ns, name)
retval = E(name)
if add_type:
retval.attrib[XSI_TYPE] = cls.get_type_name_ns(self.app.interface)
return retval
def byte_array_to_parent(self, ctx, cls, inst, parent, ns, name='retval',
**kwargs):
elt = self._gen_tag(cls, ns, name, **kwargs)
elt.text = self.to_unicode(cls, inst, self.binary_encoding)
_append(parent, elt)
def modelbase_to_parent(self, ctx, cls, inst, parent, ns, name='retval',
**kwargs):
elt = self._gen_tag(cls, ns, name, **kwargs)
elt.text = self.to_unicode(cls, inst)
_append(parent, elt)
def null_to_parent(self, ctx, cls, inst, parent, ns, name='retval',
**kwargs):
if issubclass(cls, XmlAttribute):
return
elif issubclass(cls, XmlData):
parent.attrib.update(NIL_ATTR)
else:
elt = self._gen_tag(cls, ns, name, **kwargs)
elt.attrib.update(NIL_ATTR)
_append(parent, elt)
def null_from_element(self, ctx, cls, element):
return None
def xmldata_to_parent(self, ctx, cls, inst, parent, ns, name,
add_type=False, **_):
cls_attrs = self.get_cls_attrs(cls)
ns = cls._ns
if ns is None:
ns = cls_attrs.sub_ns
name = _gen_tagname(ns, name)
if add_type:
parent.attrib[XSI_TYPE] = cls.get_type_name_ns(self.app.interface)
cls.marshall(self, name, inst, parent)
def xmlattribute_to_parent(self, ctx, cls, inst, parent, ns, name, **_):
ns = cls._ns
cls_attrs = self.get_cls_attrs(cls)
if ns is None:
ns = cls_attrs.sub_ns
name = _gen_tagname(ns, name)
if inst is not None:
if issubclass(cls.type, (ByteArray, File)):
parent.set(name, self.to_unicode(cls.type, inst,
self.binary_encoding))
else:
parent.set(name, self.to_unicode(cls.type, inst))
@coroutine
def gen_members_parent(self, ctx, cls, inst, parent, tag_name, subelts,
add_type):
attrib = {}
if add_type:
tnn = cls.get_type_name_ns(self.app.interface)
if tnn != None:
attrib[XSI_TYPE] = tnn
else:
# this only happens on incomplete interface states for eg.
# get_object_as_xml where the full init is not performed for
# perf reasons
attrib[XSI_TYPE] = cls.get_type_name()
if isinstance(parent, etree._Element):
elt = etree.SubElement(parent, tag_name, attrib=attrib)
elt.extend(subelts)
ret = self._get_members_etree(ctx, cls, inst, elt)
if isgenerator(ret):
try:
while True:
y = (yield) # may throw Break
ret.send(y)
except Break:
try:
ret.throw(Break())
except StopIteration:
pass
else:
with parent.element(tag_name, attrib=attrib):
for e in subelts:
parent.write(e)
ret = self._get_members_etree(ctx, cls, inst, parent)
if isgenerator(ret):
try:
while True:
y = (yield)
ret.send(y)
except Break:
try:
ret.throw(Break())
except StopIteration:
pass
@coroutine
def _get_members_etree(self, ctx, cls, inst, parent):
try:
parent_cls = getattr(cls, '__extends__', None)
if not (parent_cls is None):
ret = self._get_members_etree(ctx, parent_cls, inst, parent)
if ret is not None:
try:
while True:
sv2 = (yield) # may throw Break
ret.send(sv2)
except Break:
try:
ret.throw(Break())
except StopIteration:
pass
for k, v in cls._type_info.items():
sub_cls_attrs = self.get_cls_attrs(v)
if sub_cls_attrs.exc:
continue
try:
subvalue = getattr(inst, k, None)
except: # e.g. SqlAlchemy could throw NoSuchColumnError
subvalue = None
# This is a tight loop, so enable this only when necessary.
# logger.debug("get %r(%r) from %r: %r" % (k, v, inst, subvalue))
sub_ns = v.Attributes.sub_ns
if sub_ns is None:
sub_ns = cls.get_namespace()
sub_name = v.Attributes.sub_name
if sub_name is None:
sub_name = k
mo = v.Attributes.max_occurs
if subvalue is not None and mo > 1:
if isinstance(subvalue, PushBase):
while True:
sv = (yield)
ret = self.to_parent(ctx, v, sv, parent, sub_ns,
sub_name)
if ret is not None:
try:
while True:
sv2 = (yield) # may throw Break
ret.send(sv2)
except Break:
try:
ret.throw(Break())
except StopIteration:
pass
else:
for sv in subvalue:
ret = self.to_parent(ctx, v, sv, parent, sub_ns,
sub_name)
if ret is not None:
try:
while True:
sv2 = (yield) # may throw Break
ret.send(sv2)
except Break:
try:
ret.throw(Break())
except StopIteration:
pass
# Don't include empty values for
# non-nillable optional attributes.
elif subvalue is not None or v.Attributes.min_occurs > 0:
ret = self.to_parent(ctx, v, subvalue, parent, sub_ns,
sub_name)
if ret is not None:
try:
while True:
sv2 = (yield)
ret.send(sv2)
except Break as b:
try:
ret.throw(b)
except StopIteration:
pass
except Break:
pass
def complex_to_parent(self, ctx, cls, inst, parent, ns, name=None,
add_type=False, **_):
cls_attrs = self.get_cls_attrs(cls)
sub_name = cls_attrs.sub_name
if sub_name is not None:
name = sub_name
if name is None:
name = cls.get_type_name()
sub_ns = cls_attrs.sub_ns
if not sub_ns in (None, DEFAULT_NS):
ns = sub_ns
tag_name = _gen_tagname(ns, name)
inst = cls.get_serialization_instance(inst)
return self.gen_members_parent(ctx, cls, inst, parent, tag_name, [],
add_type)
def _fault_to_parent_impl(self, ctx, cls, inst, parent, ns, subelts, **_):
tag_name = "{%s}Fault" % self.ns_soap_env
# Accepting raw lxml objects as detail is DEPRECATED. It's also not
# documented. It's kept for backwards-compatibility purposes.
if isinstance(inst.detail, string_types + (etree._Element,)):
_append(subelts, E('detail', inst.detail))
elif isinstance(inst.detail, dict):
if len(inst.detail) > 0:
_append(subelts, root_dict_to_etree({'detail':inst.detail}))
elif inst.detail is None:
pass
else:
raise TypeError('Fault detail Must be dict, got', type(inst.detail))
# add other nonstandard fault subelements with get_members_etree
return self.gen_members_parent(ctx, cls, inst, parent, tag_name,
subelts, add_type=False)
def fault_to_parent(self, ctx, cls, inst, parent, ns, *args, **kwargs):
subelts = [
E("faultcode", '%s:%s' % (self.soap_env, inst.faultcode)),
E("faultstring", inst.faultstring),
E("faultactor", inst.faultactor),
]
return self._fault_to_parent_impl(ctx, cls, inst, parent, ns, subelts)
def schema_validation_error_to_parent(self, ctx, cls, inst, parent, ns,**_):
subelts = [
E("faultcode", '%s:%s' % (self.soap_env, inst.faultcode)),
# HACK: Does anyone know a better way of injecting raw xml entities?
E("faultstring", html.fromstring(inst.faultstring).text),
E("faultactor", inst.faultactor),
]
if inst.detail != None:
_append(subelts, E('detail', inst.detail))
# add other nonstandard fault subelements with get_members_etree
return self._fault_to_parent_impl(ctx, cls, inst, parent, ns, subelts)
def enum_to_parent(self, ctx, cls, inst, parent, ns, name='retval', **kwargs):
self.modelbase_to_parent(ctx, cls, str(inst), parent, ns, name, **kwargs)
def any_xml_to_parent(self, ctx, cls, inst, parent, ns, name, **_):
if isinstance(inst, STR_TYPES):
inst = etree.fromstring(inst)
_append(parent, E(_gen_tagname(ns, name), inst))
def any_to_parent(self, ctx, cls, inst, parent, ns, name, **_):
_append(parent, E(_gen_tagname(ns, name), inst))
def any_html_to_parent(self, ctx, cls, inst, parent, ns, name, **_):
if isinstance(inst, string_types) and len(inst) > 0:
inst = html.fromstring(inst)
_append(parent, E(_gen_tagname(ns, name), inst))
def any_dict_to_parent(self, ctx, cls, inst, parent, ns, name, **_):
elt = E(_gen_tagname(ns, name))
dict_to_etree(inst, elt)
_append(parent, elt)
def complex_from_element(self, ctx, cls, elt):
inst = cls.get_deserialization_instance(ctx)
flat_type_info = cls.get_flat_type_info(cls)
# this is for validating cls.Attributes.{min,max}_occurs
frequencies = defaultdict(int)
cls_attrs = self.get_cls_attrs(cls)
if cls_attrs._xml_tag_body_as is not None:
for xtba_key, xtba_type in cls_attrs._xml_tag_body_as:
xtba_attrs = self.get_cls_attrs(xtba_type.type)
if issubclass(xtba_type.type, (ByteArray, File)):
value = self.from_unicode(xtba_type.type, elt.text,
self.binary_encoding)
else:
value = self.from_unicode(xtba_type.type, elt.text)
inst._safe_set(xtba_key, value, xtba_type.type, xtba_attrs)
# parse input to set incoming data to related attributes.
for c in elt:
if isinstance(c, etree._Comment):
continue
key = c.tag.split('}', 1)[-1]
frequencies[key] += 1
member = flat_type_info.get(key, None)
if member is None:
member, key = cls._type_info_alt.get(key, (None, key))
if member is None:
member, key = cls._type_info_alt.get(c.tag, (None, key))
if member is None:
continue
member_attrs = self.get_cls_attrs(member)
mo = member_attrs.max_occurs
if mo > 1:
value = getattr(inst, key, None)
if value is None:
value = []
value.append(self.from_element(ctx, member, c))
else:
value = self.from_element(ctx, member, c)
inst._safe_set(key, value, member, member_attrs)
for key, value_str in c.attrib.items():
submember = flat_type_info.get(key, None)
if submember is None:
submember, key = cls._type_info_alt.get(key, (None, key))
if submember is None:
continue
submember_attrs = self.get_cls_attrs(submember)
mo = submember_attrs.max_occurs
if mo > 1:
value = getattr(inst, key, None)
if value is None:
value = []
value.append(self.from_unicode(submember.type, value_str))
else:
value = self.from_unicode(submember.type, value_str)
inst._safe_set(key, value, submember.type, submember_attrs)
for key, value_str in elt.attrib.items():
member = flat_type_info.get(key, None)
if member is None:
member, key = cls._type_info_alt.get(key, (None, key))
if member is None:
continue
if not issubclass(member, XmlAttribute):
continue
if issubclass(member.type, (ByteArray, File)):
value = self.from_unicode(member.type, value_str,
self.binary_encoding)
else:
value = self.from_unicode(member.type, value_str)
member_attrs = self.get_cls_attrs(member.type)
inst._safe_set(key, value, member.type, member_attrs)
if self.validator is self.SOFT_VALIDATION:
for key, c in flat_type_info.items():
val = frequencies.get(key, 0)
attr = self.get_cls_attrs(c)
if val < attr.min_occurs or val > attr.max_occurs:
raise Fault('Client.ValidationError', '%r member does not '
'respect frequency constraints.' % key)
return inst
def array_from_element(self, ctx, cls, element):
retval = [ ]
(serializer,) = cls._type_info.values()
for child in element.getchildren():
retval.append(self.from_element(ctx, serializer, child))
return retval
def iterable_from_element(self, ctx, cls, element):
(serializer,) = cls._type_info.values()
for child in element.getchildren():
yield self.from_element(ctx, serializer, child)
def enum_from_element(self, ctx, cls, element):
if self.validator is self.SOFT_VALIDATION and not (
cls.validate_string(cls, element.text)):
raise ValidationError(element.text)
return getattr(cls, element.text)
def fault_from_element(self, ctx, cls, element):
code = element.find('faultcode').text
string = element.find('faultstring').text
factor = element.find('faultactor')
if factor is not None:
factor = factor.text
detail = element.find('detail')
return cls(faultcode=code, faultstring=string, faultactor=factor,
detail=detail)
def xml_from_element(self, ctx, cls, element):
children = element.getchildren()
retval = None
if children:
retval = element.getchildren()[0]
return retval
def html_from_element(self, ctx, cls, element):
children = element.getchildren()
retval = None
if len(children) == 1:
retval = children[0]
# this is actually a workaround to a case that should never exist --
# anyXml types should only have one child tag.
elif len(children) > 1:
retval = E.html(*children)
return retval
def dict_from_element(self, ctx, cls, element):
children = element.getchildren()
if children:
return etree_to_dict(element)
return None
def unicode_from_element(self, ctx, cls, element):
if self.validator is self.SOFT_VALIDATION and not (
cls.validate_string(cls, element.text)):
raise ValidationError(element.text)
s = element.text
if s is None:
s = ''
retval = self.from_unicode(cls, s)
if self.validator is self.SOFT_VALIDATION and not (
cls.validate_native(cls, retval)):
raise ValidationError(retval)
return retval
def base_from_element(self, ctx, cls, element):
if self.validator is self.SOFT_VALIDATION and not (
cls.validate_string(cls, element.text)):
raise ValidationError(element.text)
retval = self.from_unicode(cls, element.text)
if self.validator is self.SOFT_VALIDATION and not (
cls.validate_native(cls, retval)):
raise ValidationError(retval)
return retval
def byte_array_from_element(self, ctx, cls, element):
if self.validator is self.SOFT_VALIDATION and not (
cls.validate_string(cls, element.text)):
raise ValidationError(element.text)
retval = self.from_unicode(cls, element.text, self.binary_encoding)
if self.validator is self.SOFT_VALIDATION and not (
cls.validate_native(cls, retval)):
raise ValidationError(retval)
return retval
use own schema instance to create validation documents
#
# spyne - Copyright (C) Spyne contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
"""The ``spyne.protocol.xml`` module contains an xml-based protocol that
serializes python objects to xml using Xml Schema conventions.
Logs valid documents to ``'spyne.protocol.xml'`` and invalid documents to
``spyne.protocol.xml.invalid``. Use the usual ``logging.getLogger()`` and
friends to configure how these get logged.
Warning! You can get a lot of crap in the 'invalid' logger. You're not advised
to turn it on for a production system.
"""
import logging
logger = logging.getLogger('spyne.protocol.xml')
logger_invalid = logging.getLogger('spyne.protocol.xml.invalid')
from inspect import isgenerator
from collections import defaultdict
from lxml import etree
from lxml import html
from lxml.builder import E
from lxml.etree import XMLSyntaxError
from lxml.etree import XMLParser
from spyne import BODY_STYLE_WRAPPED
from spyne.util import Break, coroutine
from spyne.util.six import text_type, string_types
from spyne.util.cdict import cdict
from spyne.util.etreeconv import etree_to_dict, dict_to_etree,\
root_dict_to_etree
from spyne.const.xml import XSI, NS_SOAP11_ENC
from spyne.error import Fault
from spyne.error import ValidationError
from spyne.const.ansi_color import LIGHT_GREEN
from spyne.const.ansi_color import LIGHT_RED
from spyne.const.ansi_color import END_COLOR
from spyne.const.xml import NS_SOAP11_ENV
from spyne.const.xml import PREFMAP, DEFAULT_NS
from spyne.model import Any, ModelBase, Array, Iterable, ComplexModelBase, \
AnyHtml, AnyXml, AnyDict, Unicode, PushBase, File, ByteArray, XmlData, \
XmlAttribute
from spyne.model.binary import BINARY_ENCODING_BASE64
from spyne.model.enum import EnumBase
from spyne.protocol import ProtocolBase
from spyne.util import six
if six.PY2:
STR_TYPES = (str, unicode)
else:
STR_TYPES = (str, bytes)
NIL_ATTR = {XSI('nil'): 'true'}
XSI_TYPE = XSI('type')
def _append(parent, child_elt):
if hasattr(parent, 'append'):
parent.append(child_elt)
else:
parent.write(child_elt)
def _gen_tagname(ns, name):
if ns is not None:
name = "{%s}%s" % (ns, name)
return name
class SchemaValidationError(Fault):
"""Raised when the input stream could not be validated by the Xml Schema."""
CODE = 'Client.SchemaValidationError'
def __init__(self, faultstring):
super(SchemaValidationError, self).__init__(self.CODE, faultstring)
class SubXmlBase(ProtocolBase):
def subserialize(self, ctx, cls, inst, parent, ns=None, name=None):
return self.to_parent(ctx, cls, inst, parent, name)
def to_parent(self, ctx, cls, inst, parent, ns, *args, **kwargs):
"""Serializes inst to an Element instance and appends it to the 'parent'.
:param self: The protocol that will be used to serialize the given
value.
:param cls: The type of the value that's going to determine how to
pack the given value.
:param inst: The value to be set for the 'text' element of the newly
created SubElement
:param parent: The parent Element to which the new child will be
appended.
:param ns: The target namespace of the new SubElement, used with
'name' to set the tag.
:param name: The tag name of the new SubElement, 'retval' by default.
"""
raise NotImplementedError()
class XmlDocument(SubXmlBase):
"""The Xml input and output protocol, using the information from the Xml
Schema generated by Spyne types.
See the following material for more (much much more!) information.
* http://www.w3.org/TR/xmlschema-0/
* http://www.w3.org/TR/xmlschema-1/
* http://www.w3.org/TR/xmlschema-2/
Receiving Xml from untrusted sources is a dodgy security dance as the Xml
attack surface is /huge/.
Spyne's ```lxml.etree.XMLParser``` instance has ```resolve_pis```,
```load_dtd```, ```resolve_entities```, ```dtd_validation```,
```huge_tree``` Defaults to ``False``
Having ```resolve_entities``` disabled will prevent the 'lxml' validation
for documents with custom xml entities defined in the DTD. See the example
in examples/xml/validation_error to play with the settings that work best
for you. Please note that enabling ```resolve_entities``` is a security
hazard that can lead to disclosure of sensitive information.
See https://pypi.python.org/pypi/defusedxml for a pragmatic overview of
Xml security in Python world.
:param app: The owner application instance.
:param validator: One of (None, 'soft', 'lxml', 'schema',
ProtocolBase.SOFT_VALIDATION, XmlDocument.SCHEMA_VALIDATION).
Both ``'lxml'`` and ``'schema'`` values are equivalent to
``XmlDocument.SCHEMA_VALIDATION``.
Defaults to ``None``.
:param replace_null_with_default: If ``False``, does not replace incoming
explicit null values with denoted default values. This is against Xml
Schema standard but consistent with other Spyne protocol
implementations. Set this to False if you want cross-protocol
compatibility.
Defaults to ``True``.
Relevant quote from xml schema primer
(http://www.w3.org/TR/xmlschema-0/):
..
When a value is present and is null The schema processor treats
defaulted elements slightly differently. When an element is declared
with a default value, the value of the element is whatever value
appears as the element's content in the instance document; if the
element appears without any content, the schema processor provides
the element with a value equal to that of the default attribute.
However, if the element does not appear in the instance document,
the schema processor does not provide the element at all. In
summary, the differences between element and attribute defaults can
be stated as: Default attribute values apply when attributes are
missing, and default element values apply when elements are empty.
:param xml_declaration: Whether to add xml_declaration to the responses
Defaults to ``True``.
:param cleanup_namespaces: Whether to add clean up namespace declarations
in the response document.
Defaults to ``True``.
:param encoding: The suggested string encoding for the returned xml
documents. The transport can override this.
Defaults to ``None``.
:param pretty_print: When ``True``, returns the document in a pretty-printed
format.
Defaults to ``False``.
:param parse_xsi_type: Set to ``False`` to disable parsing of ``xsi:type``
attribute, effectively disabling polymorphism.
Defaults to ``True``.
The following are passed straight to the ``XMLParser()`` instance from
lxml. Docs are also plagiarized from the lxml documentation. Please note
that some of the defaults are different to make parsing safer by default.
:param attribute_defaults: read the DTD (if referenced by the document) and
add the default attributes from it.
Defaults to ``False``
:param dtd_validation: validate while parsing (if a DTD was referenced).
Defaults to ``False``
:param load_dtd: load and parse the DTD while parsing (no validation is
performed).
Defaults to ``False``.
:param no_network: prevent network access when looking up external
documents.
Defaults to ``True``.
:param ns_clean: try to clean up redundant namespace declarations.
Please note that this is for incoming documents.
See ``cleanup_namespaces`` parameter for output documents.
Defaults to ``False``.
:param recover: try hard to parse through broken Xml.
Defaults to ``False``.
:param remove_blank_text: discard blank text nodes between tags, also known
as ignorable whitespace. This is best used together with a DTD or schema
(which tells data and noise apart), otherwise a heuristic will be
applied.
Defaults to ``False``.
:param remove_pis: When ``True`` xml parser discards processing
instructions.
Defaults to ``True``.
:param strip_cdata: replace CDATA sections by normal text content.
Defaults to ``True``
:param resolve_entities: replace entities by their text value.
Defaults to ``False``.
:param huge_tree: disable security restrictions and support very deep trees
and very long text content. (only affects libxml2 2.7+)
Defaults to ``False``.
:param compact: use compact storage for short text content.
Defaults to ``True``.
"""
SCHEMA_VALIDATION = type("Schema", (object,), {})
mime_type = 'text/xml'
default_binary_encoding = BINARY_ENCODING_BASE64
type = set(ProtocolBase.type)
type.add('xml')
soap_env = PREFMAP[NS_SOAP11_ENV]
ns_soap_env = NS_SOAP11_ENV
ns_soap_enc = NS_SOAP11_ENC
def __init__(self, app=None, validator=None,
replace_null_with_default=True,
xml_declaration=True,
cleanup_namespaces=True, encoding=None, pretty_print=False,
attribute_defaults=False,
dtd_validation=False,
load_dtd=False,
no_network=True,
ns_clean=False,
recover=False,
remove_blank_text=False,
remove_pis=True,
strip_cdata=True,
resolve_entities=False,
huge_tree=False,
compact=True,
binary_encoding=None,
parse_xsi_type=True,
polymorphic=False,
):
super(XmlDocument, self).__init__(app, validator,
binary_encoding=binary_encoding)
self.validation_schema = None
self.xml_declaration = xml_declaration
self.cleanup_namespaces = cleanup_namespaces
self.replace_null_with_default = replace_null_with_default
if encoding is None:
self.encoding = 'UTF-8'
else:
self.encoding = encoding
self.polymorphic = polymorphic
self.pretty_print = pretty_print
self.parse_xsi_type = parse_xsi_type
self.serialization_handlers = cdict({
Any: self.any_to_parent,
Fault: self.fault_to_parent,
EnumBase: self.enum_to_parent,
AnyXml: self.any_xml_to_parent,
XmlData: self.xmldata_to_parent,
AnyDict: self.any_dict_to_parent,
AnyHtml: self.any_html_to_parent,
ModelBase: self.modelbase_to_parent,
ByteArray: self.byte_array_to_parent,
ComplexModelBase: self.complex_to_parent,
XmlAttribute: self.xmlattribute_to_parent,
SchemaValidationError: self.schema_validation_error_to_parent,
})
self.deserialization_handlers = cdict({
AnyHtml: self.html_from_element,
AnyXml: self.xml_from_element,
Any: self.xml_from_element,
Array: self.array_from_element,
Fault: self.fault_from_element,
AnyDict: self.dict_from_element,
EnumBase: self.enum_from_element,
ModelBase: self.base_from_element,
Unicode: self.unicode_from_element,
Iterable: self.iterable_from_element,
ByteArray: self.byte_array_from_element,
ComplexModelBase: self.complex_from_element,
})
self.parser_kwargs = dict(
attribute_defaults=attribute_defaults,
dtd_validation=dtd_validation,
load_dtd=load_dtd,
no_network=no_network,
ns_clean=ns_clean,
recover=recover,
remove_blank_text=remove_blank_text,
remove_comments=True,
remove_pis=remove_pis,
strip_cdata=strip_cdata,
resolve_entities=resolve_entities,
huge_tree=huge_tree,
compact=compact,
encoding=encoding,
)
def set_validator(self, validator):
if validator in ('lxml', 'schema') or \
validator is self.SCHEMA_VALIDATION:
self.validate_document = self.__validate_lxml
self.validator = self.SCHEMA_VALIDATION
elif validator == 'soft' or validator is self.SOFT_VALIDATION:
self.validator = self.SOFT_VALIDATION
elif validator is None:
pass
else:
raise ValueError(validator)
self.validation_schema = None
def validate_body(self, ctx, message):
"""Sets ctx.method_request_string and calls :func:`generate_contexts`
for validation."""
assert message in (self.REQUEST, self.RESPONSE), message
line_header = LIGHT_RED + "Error:" + END_COLOR
try:
self.validate_document(ctx.in_body_doc)
if message is self.REQUEST:
line_header = LIGHT_GREEN + "Method request string:" + END_COLOR
else:
line_header = LIGHT_RED + "Response:" + END_COLOR
finally:
if logger.level == logging.DEBUG:
logger.debug("%s %s" % (line_header, ctx.method_request_string))
logger.debug(etree.tostring(ctx.in_document, pretty_print=True))
def set_app(self, value):
ProtocolBase.set_app(self, value)
self.validation_schema = None
if self.validator is self.SCHEMA_VALIDATION and value is not None:
xml_schema = self.app.interface.docs.xml_schema
xml_schema.build_validation_schema()
self.validation_schema = xml_schema.validation_schema
def __validate_lxml(self, payload):
ret = self.validation_schema.validate(payload)
logger.debug("Validated ? %r" % ret)
if ret == False:
error_text = text_type(self.validation_schema.error_log.last_error)
raise SchemaValidationError(error_text.encode('ascii',
'xmlcharrefreplace'))
def create_in_document(self, ctx, charset=None):
"""Uses the iterable of string fragments in ``ctx.in_string`` to set
``ctx.in_document``."""
string = b''.join(ctx.in_string)
try:
try:
ctx.in_document = etree.fromstring(string,
parser=XMLParser(**self.parser_kwargs))
except ValueError:
logger.debug('ValueError: Deserializing from unicode strings '
'with encoding declaration is not supported by '
'lxml.')
ctx.in_document = etree.fromstring(string.decode(charset),
self.parser)
except XMLSyntaxError as e:
logger_invalid.error("%r in string %r", e, string)
raise Fault('Client.XMLSyntaxError', str(e))
def decompose_incoming_envelope(self, ctx, message):
assert message in (self.REQUEST, self.RESPONSE)
ctx.in_header_doc = None # If you need header support, you should use Soap
ctx.in_body_doc = ctx.in_document
ctx.method_request_string = ctx.in_body_doc.tag
self.validate_body(ctx, message)
def from_element(self, ctx, cls, element):
cls_attrs = self.get_cls_attrs(cls)
if bool(element.get(XSI('nil'))):
if self.validator is self.SOFT_VALIDATION and not \
cls_attrs.nillable:
raise ValidationError(None)
if self.replace_null_with_default:
return cls_attrs.default
return None
# if present, use the xsi:type="ns0:ObjectName"
# attribute to instantiate subclass objects
if self.parse_xsi_type:
xsi_type = element.get(XSI_TYPE, None)
if xsi_type is not None:
if ":" in xsi_type:
prefix, objtype = xsi_type.split(':', 1)
else:
prefix, objtype = None, xsi_type
ns = element.nsmap.get(prefix)
if ns is not None:
classkey = "{%s}%s" % (ns, objtype)
else:
logger.error("xsi:type namespace prefix "
"'%s' in '%s' not found in %r",
ns, xsi_type, element.nsmap)
raise ValidationError(xsi_type)
newclass = ctx.app.interface.classes.get(classkey, None)
if newclass is None:
logger.error("xsi:type '%s' interpreted as class key '%s' "
"is not recognized", xsi_type, classkey)
raise ValidationError(xsi_type)
cls = newclass
logger.debug("xsi:type '%s' overrides %r to %r", xsi_type,
cls, newclass)
handler = self.deserialization_handlers[cls]
return handler(ctx, cls, element)
def to_parent(self, ctx, cls, inst, parent, ns, *args, **kwargs):
cls, add_type = self.get_polymorphic_target(cls, inst)
cls_attrs = self.get_cls_attrs(cls)
subprot = cls_attrs.prot
if subprot is not None and isinstance(subprot, SubXmlBase):
return subprot.subserialize(ctx, cls, inst, parent, ns,
*args, **kwargs)
handler = self.serialization_handlers[cls]
if inst is None:
inst = cls_attrs.default
if inst is None:
return self.null_to_parent(ctx, cls, inst, parent, ns,
*args, **kwargs)
if cls_attrs.exc:
return
kwargs['add_type'] = add_type
return handler(ctx, cls, inst, parent, ns, *args, **kwargs)
def deserialize(self, ctx, message):
"""Takes a MethodContext instance and a string containing ONE root xml
tag.
Returns the corresponding native python object.
Not meant to be overridden.
"""
assert message in (self.REQUEST, self.RESPONSE)
self.event_manager.fire_event('before_deserialize', ctx)
if ctx.descriptor is None:
if ctx.in_error is None:
raise Fault("Client", "Method %r not found." %
ctx.method_request_string)
else:
raise ctx.in_error
if message is self.REQUEST:
body_class = ctx.descriptor.in_message
elif message is self.RESPONSE:
body_class = ctx.descriptor.out_message
# decode method arguments
if ctx.in_body_doc is None:
ctx.in_object = [None] * len(body_class._type_info)
else:
ctx.in_object = self.from_element(ctx, body_class, ctx.in_body_doc)
if logger.level == logging.DEBUG and message is self.REQUEST:
line_header = '%sRequest%s' % (LIGHT_GREEN, END_COLOR)
outdoc_str = None
if ctx.out_document is not None:
outdoc_str = etree.tostring(ctx.out_document,
xml_declaration=self.xml_declaration, pretty_print=True)
logger.debug("%s %s" % (line_header, outdoc_str))
self.event_manager.fire_event('after_deserialize', ctx)
def serialize(self, ctx, message):
"""Uses ``ctx.out_object``, ``ctx.out_header`` or ``ctx.out_error`` to
set ``ctx.out_body_doc``, ``ctx.out_header_doc`` and
``ctx.out_document`` as an ``lxml.etree._Element instance``.
Not meant to be overridden.
"""
assert message in (self.REQUEST, self.RESPONSE)
self.event_manager.fire_event('before_serialize', ctx)
if ctx.out_error is not None:
tmp_elt = etree.Element('punk')
retval = self.to_parent(ctx, ctx.out_error.__class__, ctx.out_error,
tmp_elt, self.app.interface.get_tns())
ctx.out_document = tmp_elt[0]
else:
if message is self.REQUEST:
result_message_class = ctx.descriptor.in_message
elif message is self.RESPONSE:
result_message_class = ctx.descriptor.out_message
# assign raw result to its wrapper, result_message
if ctx.descriptor.body_style == BODY_STYLE_WRAPPED:
result_inst = result_message_class()
for i, (k, v) in enumerate(
result_message_class._type_info.items()):
attrs = self.get_cls_attrs(v)
result_inst._safe_set(k, ctx.out_object[i], v, attrs)
else:
result_inst = ctx.out_object
if ctx.out_stream is None:
tmp_elt = etree.Element('punk')
retval = self.to_parent(ctx, result_message_class,
result_inst, tmp_elt, self.app.interface.get_tns())
ctx.out_document = tmp_elt[0]
else:
retval = self.incgen(ctx, result_message_class,
result_inst, self.app.interface.get_tns())
if self.cleanup_namespaces and ctx.out_document is not None:
etree.cleanup_namespaces(ctx.out_document)
self.event_manager.fire_event('after_serialize', ctx)
return retval
def create_out_string(self, ctx, charset=None):
"""Sets an iterable of string fragments to ctx.out_string"""
if charset is None:
charset = self.encoding
ctx.out_string = [etree.tostring(ctx.out_document,
encoding=charset,
pretty_print=self.pretty_print,
xml_declaration=self.xml_declaration)]
if logger.level == logging.DEBUG:
logger.debug('%sResponse%s %s' % (LIGHT_RED, END_COLOR,
etree.tostring(ctx.out_document,
pretty_print=True, encoding='UTF-8')))
@coroutine
def incgen(self, ctx, cls, inst, ns, name=None):
if name is None:
name = cls.get_type_name()
with etree.xmlfile(ctx.out_stream) as xf:
ret = self.to_parent(ctx, cls, inst, xf, ns, name)
if isgenerator(ret):
try:
while True:
y = (yield) # may throw Break
ret.send(y)
except Break:
try:
ret.throw(Break())
except StopIteration:
pass
if hasattr(ctx.out_stream, 'finish'):
ctx.out_stream.finish()
def _gen_tag(self, cls, ns, name, add_type=False, **_):
if ns is not None:
name = "{%s}%s" % (ns, name)
retval = E(name)
if add_type:
retval.attrib[XSI_TYPE] = cls.get_type_name_ns(self.app.interface)
return retval
def byte_array_to_parent(self, ctx, cls, inst, parent, ns, name='retval',
**kwargs):
elt = self._gen_tag(cls, ns, name, **kwargs)
elt.text = self.to_unicode(cls, inst, self.binary_encoding)
_append(parent, elt)
def modelbase_to_parent(self, ctx, cls, inst, parent, ns, name='retval',
**kwargs):
elt = self._gen_tag(cls, ns, name, **kwargs)
elt.text = self.to_unicode(cls, inst)
_append(parent, elt)
def null_to_parent(self, ctx, cls, inst, parent, ns, name='retval',
**kwargs):
if issubclass(cls, XmlAttribute):
return
elif issubclass(cls, XmlData):
parent.attrib.update(NIL_ATTR)
else:
elt = self._gen_tag(cls, ns, name, **kwargs)
elt.attrib.update(NIL_ATTR)
_append(parent, elt)
def null_from_element(self, ctx, cls, element):
return None
def xmldata_to_parent(self, ctx, cls, inst, parent, ns, name,
add_type=False, **_):
cls_attrs = self.get_cls_attrs(cls)
ns = cls._ns
if ns is None:
ns = cls_attrs.sub_ns
name = _gen_tagname(ns, name)
if add_type:
parent.attrib[XSI_TYPE] = cls.get_type_name_ns(self.app.interface)
cls.marshall(self, name, inst, parent)
def xmlattribute_to_parent(self, ctx, cls, inst, parent, ns, name, **_):
ns = cls._ns
cls_attrs = self.get_cls_attrs(cls)
if ns is None:
ns = cls_attrs.sub_ns
name = _gen_tagname(ns, name)
if inst is not None:
if issubclass(cls.type, (ByteArray, File)):
parent.set(name, self.to_unicode(cls.type, inst,
self.binary_encoding))
else:
parent.set(name, self.to_unicode(cls.type, inst))
@coroutine
def gen_members_parent(self, ctx, cls, inst, parent, tag_name, subelts,
add_type):
attrib = {}
if add_type:
tnn = cls.get_type_name_ns(self.app.interface)
if tnn != None:
attrib[XSI_TYPE] = tnn
else:
# this only happens on incomplete interface states for eg.
# get_object_as_xml where the full init is not performed for
# perf reasons
attrib[XSI_TYPE] = cls.get_type_name()
if isinstance(parent, etree._Element):
elt = etree.SubElement(parent, tag_name, attrib=attrib)
elt.extend(subelts)
ret = self._get_members_etree(ctx, cls, inst, elt)
if isgenerator(ret):
try:
while True:
y = (yield) # may throw Break
ret.send(y)
except Break:
try:
ret.throw(Break())
except StopIteration:
pass
else:
with parent.element(tag_name, attrib=attrib):
for e in subelts:
parent.write(e)
ret = self._get_members_etree(ctx, cls, inst, parent)
if isgenerator(ret):
try:
while True:
y = (yield)
ret.send(y)
except Break:
try:
ret.throw(Break())
except StopIteration:
pass
@coroutine
def _get_members_etree(self, ctx, cls, inst, parent):
try:
parent_cls = getattr(cls, '__extends__', None)
if not (parent_cls is None):
ret = self._get_members_etree(ctx, parent_cls, inst, parent)
if ret is not None:
try:
while True:
sv2 = (yield) # may throw Break
ret.send(sv2)
except Break:
try:
ret.throw(Break())
except StopIteration:
pass
for k, v in cls._type_info.items():
sub_cls_attrs = self.get_cls_attrs(v)
if sub_cls_attrs.exc:
continue
try:
subvalue = getattr(inst, k, None)
except: # e.g. SqlAlchemy could throw NoSuchColumnError
subvalue = None
# This is a tight loop, so enable this only when necessary.
# logger.debug("get %r(%r) from %r: %r" % (k, v, inst, subvalue))
sub_ns = v.Attributes.sub_ns
if sub_ns is None:
sub_ns = cls.get_namespace()
sub_name = v.Attributes.sub_name
if sub_name is None:
sub_name = k
mo = v.Attributes.max_occurs
if subvalue is not None and mo > 1:
if isinstance(subvalue, PushBase):
while True:
sv = (yield)
ret = self.to_parent(ctx, v, sv, parent, sub_ns,
sub_name)
if ret is not None:
try:
while True:
sv2 = (yield) # may throw Break
ret.send(sv2)
except Break:
try:
ret.throw(Break())
except StopIteration:
pass
else:
for sv in subvalue:
ret = self.to_parent(ctx, v, sv, parent, sub_ns,
sub_name)
if ret is not None:
try:
while True:
sv2 = (yield) # may throw Break
ret.send(sv2)
except Break:
try:
ret.throw(Break())
except StopIteration:
pass
# Don't include empty values for
# non-nillable optional attributes.
elif subvalue is not None or v.Attributes.min_occurs > 0:
ret = self.to_parent(ctx, v, subvalue, parent, sub_ns,
sub_name)
if ret is not None:
try:
while True:
sv2 = (yield)
ret.send(sv2)
except Break as b:
try:
ret.throw(b)
except StopIteration:
pass
except Break:
pass
def complex_to_parent(self, ctx, cls, inst, parent, ns, name=None,
add_type=False, **_):
cls_attrs = self.get_cls_attrs(cls)
sub_name = cls_attrs.sub_name
if sub_name is not None:
name = sub_name
if name is None:
name = cls.get_type_name()
sub_ns = cls_attrs.sub_ns
if not sub_ns in (None, DEFAULT_NS):
ns = sub_ns
tag_name = _gen_tagname(ns, name)
inst = cls.get_serialization_instance(inst)
return self.gen_members_parent(ctx, cls, inst, parent, tag_name, [],
add_type)
def _fault_to_parent_impl(self, ctx, cls, inst, parent, ns, subelts, **_):
tag_name = "{%s}Fault" % self.ns_soap_env
# Accepting raw lxml objects as detail is DEPRECATED. It's also not
# documented. It's kept for backwards-compatibility purposes.
if isinstance(inst.detail, string_types + (etree._Element,)):
_append(subelts, E('detail', inst.detail))
elif isinstance(inst.detail, dict):
if len(inst.detail) > 0:
_append(subelts, root_dict_to_etree({'detail':inst.detail}))
elif inst.detail is None:
pass
else:
raise TypeError('Fault detail Must be dict, got', type(inst.detail))
# add other nonstandard fault subelements with get_members_etree
return self.gen_members_parent(ctx, cls, inst, parent, tag_name,
subelts, add_type=False)
def fault_to_parent(self, ctx, cls, inst, parent, ns, *args, **kwargs):
subelts = [
E("faultcode", '%s:%s' % (self.soap_env, inst.faultcode)),
E("faultstring", inst.faultstring),
E("faultactor", inst.faultactor),
]
return self._fault_to_parent_impl(ctx, cls, inst, parent, ns, subelts)
def schema_validation_error_to_parent(self, ctx, cls, inst, parent, ns,**_):
subelts = [
E("faultcode", '%s:%s' % (self.soap_env, inst.faultcode)),
# HACK: Does anyone know a better way of injecting raw xml entities?
E("faultstring", html.fromstring(inst.faultstring).text),
E("faultactor", inst.faultactor),
]
if inst.detail != None:
_append(subelts, E('detail', inst.detail))
# add other nonstandard fault subelements with get_members_etree
return self._fault_to_parent_impl(ctx, cls, inst, parent, ns, subelts)
def enum_to_parent(self, ctx, cls, inst, parent, ns, name='retval', **kwargs):
self.modelbase_to_parent(ctx, cls, str(inst), parent, ns, name, **kwargs)
def any_xml_to_parent(self, ctx, cls, inst, parent, ns, name, **_):
if isinstance(inst, STR_TYPES):
inst = etree.fromstring(inst)
_append(parent, E(_gen_tagname(ns, name), inst))
def any_to_parent(self, ctx, cls, inst, parent, ns, name, **_):
_append(parent, E(_gen_tagname(ns, name), inst))
def any_html_to_parent(self, ctx, cls, inst, parent, ns, name, **_):
if isinstance(inst, string_types) and len(inst) > 0:
inst = html.fromstring(inst)
_append(parent, E(_gen_tagname(ns, name), inst))
def any_dict_to_parent(self, ctx, cls, inst, parent, ns, name, **_):
elt = E(_gen_tagname(ns, name))
dict_to_etree(inst, elt)
_append(parent, elt)
def complex_from_element(self, ctx, cls, elt):
inst = cls.get_deserialization_instance(ctx)
flat_type_info = cls.get_flat_type_info(cls)
# this is for validating cls.Attributes.{min,max}_occurs
frequencies = defaultdict(int)
cls_attrs = self.get_cls_attrs(cls)
if cls_attrs._xml_tag_body_as is not None:
for xtba_key, xtba_type in cls_attrs._xml_tag_body_as:
xtba_attrs = self.get_cls_attrs(xtba_type.type)
if issubclass(xtba_type.type, (ByteArray, File)):
value = self.from_unicode(xtba_type.type, elt.text,
self.binary_encoding)
else:
value = self.from_unicode(xtba_type.type, elt.text)
inst._safe_set(xtba_key, value, xtba_type.type, xtba_attrs)
# parse input to set incoming data to related attributes.
for c in elt:
if isinstance(c, etree._Comment):
continue
key = c.tag.split('}', 1)[-1]
frequencies[key] += 1
member = flat_type_info.get(key, None)
if member is None:
member, key = cls._type_info_alt.get(key, (None, key))
if member is None:
member, key = cls._type_info_alt.get(c.tag, (None, key))
if member is None:
continue
member_attrs = self.get_cls_attrs(member)
mo = member_attrs.max_occurs
if mo > 1:
value = getattr(inst, key, None)
if value is None:
value = []
value.append(self.from_element(ctx, member, c))
else:
value = self.from_element(ctx, member, c)
inst._safe_set(key, value, member, member_attrs)
for key, value_str in c.attrib.items():
submember = flat_type_info.get(key, None)
if submember is None:
submember, key = cls._type_info_alt.get(key, (None, key))
if submember is None:
continue
submember_attrs = self.get_cls_attrs(submember)
mo = submember_attrs.max_occurs
if mo > 1:
value = getattr(inst, key, None)
if value is None:
value = []
value.append(self.from_unicode(submember.type, value_str))
else:
value = self.from_unicode(submember.type, value_str)
inst._safe_set(key, value, submember.type, submember_attrs)
for key, value_str in elt.attrib.items():
member = flat_type_info.get(key, None)
if member is None:
member, key = cls._type_info_alt.get(key, (None, key))
if member is None:
continue
if not issubclass(member, XmlAttribute):
continue
if issubclass(member.type, (ByteArray, File)):
value = self.from_unicode(member.type, value_str,
self.binary_encoding)
else:
value = self.from_unicode(member.type, value_str)
member_attrs = self.get_cls_attrs(member.type)
inst._safe_set(key, value, member.type, member_attrs)
if self.validator is self.SOFT_VALIDATION:
for key, c in flat_type_info.items():
val = frequencies.get(key, 0)
attr = self.get_cls_attrs(c)
if val < attr.min_occurs or val > attr.max_occurs:
raise Fault('Client.ValidationError', '%r member does not '
'respect frequency constraints.' % key)
return inst
def array_from_element(self, ctx, cls, element):
retval = [ ]
(serializer,) = cls._type_info.values()
for child in element.getchildren():
retval.append(self.from_element(ctx, serializer, child))
return retval
def iterable_from_element(self, ctx, cls, element):
(serializer,) = cls._type_info.values()
for child in element.getchildren():
yield self.from_element(ctx, serializer, child)
def enum_from_element(self, ctx, cls, element):
if self.validator is self.SOFT_VALIDATION and not (
cls.validate_string(cls, element.text)):
raise ValidationError(element.text)
return getattr(cls, element.text)
def fault_from_element(self, ctx, cls, element):
code = element.find('faultcode').text
string = element.find('faultstring').text
factor = element.find('faultactor')
if factor is not None:
factor = factor.text
detail = element.find('detail')
return cls(faultcode=code, faultstring=string, faultactor=factor,
detail=detail)
def xml_from_element(self, ctx, cls, element):
children = element.getchildren()
retval = None
if children:
retval = element.getchildren()[0]
return retval
def html_from_element(self, ctx, cls, element):
children = element.getchildren()
retval = None
if len(children) == 1:
retval = children[0]
# this is actually a workaround to a case that should never exist --
# anyXml types should only have one child tag.
elif len(children) > 1:
retval = E.html(*children)
return retval
def dict_from_element(self, ctx, cls, element):
children = element.getchildren()
if children:
return etree_to_dict(element)
return None
def unicode_from_element(self, ctx, cls, element):
if self.validator is self.SOFT_VALIDATION and not (
cls.validate_string(cls, element.text)):
raise ValidationError(element.text)
s = element.text
if s is None:
s = ''
retval = self.from_unicode(cls, s)
if self.validator is self.SOFT_VALIDATION and not (
cls.validate_native(cls, retval)):
raise ValidationError(retval)
return retval
def base_from_element(self, ctx, cls, element):
if self.validator is self.SOFT_VALIDATION and not (
cls.validate_string(cls, element.text)):
raise ValidationError(element.text)
retval = self.from_unicode(cls, element.text)
if self.validator is self.SOFT_VALIDATION and not (
cls.validate_native(cls, retval)):
raise ValidationError(retval)
return retval
def byte_array_from_element(self, ctx, cls, element):
if self.validator is self.SOFT_VALIDATION and not (
cls.validate_string(cls, element.text)):
raise ValidationError(element.text)
retval = self.from_unicode(cls, element.text, self.binary_encoding)
if self.validator is self.SOFT_VALIDATION and not (
cls.validate_native(cls, retval)):
raise ValidationError(retval)
return retval
|
# -*- coding: utf-8 -*-
"""Set flags on a set of mails
Usage: imap-cli-flag [options] [<directory>] <mail_id> <flag>...
Options:
-c, --config-file=<FILE> Configuration file
-v, --verbose Generate verbose messages
-h, --help Show help options.
--version Print program version.
----
imap-cli-flag 0.3
Copyright (C) 2014 Romain Soufflet
License MIT
This is free software: you are free to change and redistribute it.
There is NO WARRANTY, to the extent permitted by law.
"""
import logging
import sys
import docopt
import imap_cli
from imap_cli import config
from imap_cli import const
log = logging.getLogger('imap-cli-flag')
def unset_flag(imap_account, mail_id=None, flags_str=''):
if mail_id is None:
log.error('Can\'t set flag on email {}'.format(mail_id))
return None
# TODO(rsoufflet)
truc = imap_account.store(mail_id, '+FLAGS', flags_str)
log.debug(repr(truc))
def flag(imap_account, mail_id, flags, directory=const.DEFAULT_DIRECTORY):
status, mail_count = imap_account.select(directory)
if status != const.STATUS_OK:
log.warn(u'Cannot access directory {}'.format(directory))
return
for flag in flags:
if mail_id is None:
log.error('Can\'t set flag on email {}'.format(mail_id))
continue
# TODO(rsoufflet)
truc = imap_account.store(mail_id, '+FLAGS', r'({})'.format(flag))
log.debug(repr(truc))
def main():
args = docopt.docopt('\n'.join(__doc__.split('\n')[2:]))
logging.basicConfig(
level=logging.DEBUG if args['--verbose'] else logging.WARNING,
stream=sys.stdout,
)
conf = config.new_context_from_file(args['--config-file'], section='imap')
imap_account = imap_cli.connect(**conf)
flag(imap_account, args['<mail_id>'], args['<flag>'], directory=args['<directory>'])
imap_cli.disconnect(imap_account)
return 0
if __name__ == '__main__':
sys.exit(main())
Fix flag script
Let user unset flags too.
# -*- coding: utf-8 -*-
"""Set flags on a set of mails
Usage: imap-cli-flag [options] <mail_id> <flag>...
Options:
-c, --config-file=<FILE> Configuration file
-d, --directory=<DIR> Imap folder
-u, --unset Remove flag instead of setting them
-v, --verbose Generate verbose messages
-h, --help Show help options.
--version Print program version.
----
imap-cli-flag 0.3
Copyright (C) 2014 Romain Soufflet
License MIT
This is free software: you are free to change and redistribute it.
There is NO WARRANTY, to the extent permitted by law.
"""
import logging
import sys
import docopt
import imap_cli
from imap_cli import config
from imap_cli import const
log = logging.getLogger('imap-cli-flag')
def flag(imap_account, message_set, flags, unset=False):
if message_set is None or len(message_set) == 0:
log.error('Invalid message set')
request_message_set = ','.join(str(mail_id) for mail_id in message_set)
status, result = imap_account.uid(
u'STORE',
request_message_set,
u'+FLAGS' if unset is False else '-FLAGS',
u'({})'.format(u' '.join(flags)),
)
if status == const.STATUS_OK:
log.debug('Flags "{}" have been set : {}'.format(flags, result))
else:
log.error('Flags "{}" have not been set : {}'.format(flags, result))
def main():
args = docopt.docopt('\n'.join(__doc__.split('\n')[2:]))
logging.basicConfig(
level=logging.DEBUG if args['--verbose'] else logging.WARNING,
stream=sys.stdout,
)
conf = config.new_context_from_file(args['--config-file'], section='imap')
imap_account = imap_cli.connect(**conf)
imap_cli.change_dir(imap_account, args['--directory'] or const.DEFAULT_DIRECTORY, read_only=False)
flag(imap_account, [args['<mail_id>']], args['<flag>'], unset=args['--unset'])
imap_cli.disconnect(imap_account)
return 0
if __name__ == '__main__':
sys.exit(main())
|
import gzip
import re
import requests
import string
import sys
import time
import random
DEFAULT_HEADERS = {'User-Agent': 'ArchiveTeam'}
class FetchError(Exception):
'''Custom error class when fetching does not meet our expectation.'''
def main():
# Take the program arguments given to this script
# Normal programs use 'argparse' but this keeps things simple
start_num = int(sys.argv[1])
end_num = int(sys.argv[2])
output_filename = sys.argv[3] # this should be something like myfile.txt.gz
assert start_num <= end_num
print('Starting', start_num, end_num)
gzip_file = gzip.GzipFile(output_filename, 'wb')
for shortcode in check_range(start_num, end_num):
# Write the valid result one per line to the file
line = '{0}\n'.format(shortcode)
gzip_file.write(line.encode('ascii'))
gzip_file.close()
print('Done')
def check_range(start_num, end_num):
for num in range(start_num, end_num + 1):
shortcode = num
url = 'https://www.blogger.com/profile/{0}'.format(shortcode)
counter = 0
while True:
# Try 20 times before giving up
if counter > 20:
# This will stop the script with an error
raise Exception('Giving up!')
try:
text = fetch(url)
except FetchError:
# The server may be overloaded so wait a bit
print('Sleeping... If you see this')
time.sleep(10)
else:
if text:
yield 'id:{0}'.format(shortcode)
userid = extract_handle(text)
if userid:
yield 'user:{0}'.format(userid)
for blog in extract_blogs(text):
yield 'blog:{0}'.format(blog)
break # stop the while loop
counter += 1
def fetch(url):
'''Fetch the URL and check if it returns OK.
Returns True, returns the response text. Otherwise, returns None
'''
time.sleep(random.randint(10,25))
print('Fetch', url)
response = requests.get(url, headers=DEFAULT_HEADERS)
# response doesn't have a reason attribute all the time??
print('Got', response.status_code, getattr(response, 'reason'))
if response.status_code == 200:
# The item exists
if not response.text:
# If HTML is empty maybe server broke
raise FetchError()
return response.text
elif response.status_code == 404:
# Does not exist
return
else:
# Problem
raise FetchError()
def extract_handle(text):
'''Return the page creator from the text.'''
# Search for something like
# "http://www.blogger.com/feeds/14366755180455532991/blogs"
match = re.search(r'"http?://www\.blogger\.[a-z]+/feeds/([0-9]+)/', text)
if match:
return match.group(1)
def extract_blogs(text):
'''Return a list of tags from the text.'''
# Search for "http://onwonder.blogspot.com/"
return re.findall(r'"https?://([^\.]+)\.blogspot\.[a-z]+/"', text)
if __name__ == '__main__':
main()
discover.py: get no-blogspot blogs also
import gzip
import re
import requests
import string
import sys
import time
import random
DEFAULT_HEADERS = {'User-Agent': 'ArchiveTeam'}
class FetchError(Exception):
'''Custom error class when fetching does not meet our expectation.'''
def main():
# Take the program arguments given to this script
# Normal programs use 'argparse' but this keeps things simple
start_num = int(sys.argv[1])
end_num = int(sys.argv[2])
output_filename = sys.argv[3] # this should be something like myfile.txt.gz
assert start_num <= end_num
print('Starting', start_num, end_num)
gzip_file = gzip.GzipFile(output_filename, 'wb')
for shortcode in check_range(start_num, end_num):
# Write the valid result one per line to the file
line = '{0}\n'.format(shortcode)
gzip_file.write(line.encode('ascii'))
gzip_file.close()
print('Done')
def check_range(start_num, end_num):
for num in range(start_num, end_num + 1):
shortcode = num
url = 'https://www.blogger.com/profile/{0}'.format(shortcode)
counter = 0
while True:
# Try 20 times before giving up
if counter > 20:
# This will stop the script with an error
raise Exception('Giving up!')
try:
text = fetch(url)
except FetchError:
# The server may be overloaded so wait a bit
print('Sleeping... If you see this')
time.sleep(10)
else:
if text:
yield 'id:{0}'.format(shortcode)
userid = extract_handle(text)
if userid:
yield 'user:{0}'.format(userid)
for blog in extract_blogs(text):
yield 'blog:{0}'.format(blog)
break # stop the while loop
counter += 1
def fetch(url):
'''Fetch the URL and check if it returns OK.
Returns True, returns the response text. Otherwise, returns None
'''
time.sleep(random.randint(10,25))
print('Fetch', url)
response = requests.get(url, headers=DEFAULT_HEADERS)
# response doesn't have a reason attribute all the time??
print('Got', response.status_code, getattr(response, 'reason'))
if response.status_code == 200:
# The item exists
if not response.text:
# If HTML is empty maybe server broke
raise FetchError()
return response.text
elif response.status_code == 404:
# Does not exist
return
else:
# Problem
raise FetchError()
def extract_handle(text):
'''Return the page creator from the text.'''
# Search for something like
# "http://www.blogger.com/feeds/14366755180455532991/blogs"
match = re.search(r'"http?://www\.blogger\.[a-z]+/feeds/([0-9]+)/', text)
if match:
return match.group(1)
def extract_blogs(text):
'''Return a list of tags from the text.'''
# Search for "http://onwonder.blogspot.com/"
return re.findall(r'"(https?://[^"]+)" rel="contributor\-to nofollow"', text)
if __name__ == '__main__':
main()
|
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: CalTableFlatBuffers
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class TrtTable(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsTrtTable(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = TrtTable()
x.Init(buf, n + offset)
return x
# TrtTable
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# TrtTable
def Dict(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from CalTableFlatBuffers.KeyValue import KeyValue
obj = KeyValue()
obj.Init(self._tab.Bytes, x)
return obj
return None
# TrtTable
def DictLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.VectorLen(o)
return 0
# TrtTable
def DictIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
return o == 0
def TrtTableStart(builder):
builder.StartObject(1)
def TrtTableAddDict(builder, dict):
builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(dict), 0)
def TrtTableStartDictVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def TrtTableEnd(builder):
return builder.EndObject()
fix: import error in TrtTable::Dict method (#8940)
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: CalTableFlatBuffers
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class TrtTable(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsTrtTable(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = TrtTable()
x.Init(buf, n + offset)
return x
# TrtTable
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# TrtTable
def Dict(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from onnxruntime.quantization.CalTableFlatBuffers.KeyValue import KeyValue
obj = KeyValue()
obj.Init(self._tab.Bytes, x)
return obj
return None
# TrtTable
def DictLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.VectorLen(o)
return 0
# TrtTable
def DictIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
return o == 0
def TrtTableStart(builder):
builder.StartObject(1)
def TrtTableAddDict(builder, dict):
builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(dict), 0)
def TrtTableStartDictVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def TrtTableEnd(builder):
return builder.EndObject()
|
from __future__ import print_function
from __future__ import division
import sys
import time
import numpy as np
from copy import deepcopy
import tensorflow as tf
from attention_gru_cell import AttentionGRUCell
from tensorflow.contrib.cudnn_rnn.python.ops import cudnn_rnn_ops
import babi_input
class Config(object):
"""Holds model hyperparams and data information."""
batch_size = 100
embed_size = 80
hidden_size = 80
max_epochs = 256
early_stopping = 20
dropout = 0.9
lr = 0.001
l2 = 0.001
cap_grads = False
max_grad_val = 10
noisy_grads = False
word2vec_init = False
embedding_init = np.sqrt(3)
# NOTE not currently used hence non-sensical anneal_threshold
anneal_threshold = 1000
anneal_by = 1.5
num_hops = 3
num_attention_features = 4
max_allowed_inputs = 130
num_train = 9000
floatX = np.float32
babi_id = "1"
babi_test_id = ""
train_mode = True
def _add_gradient_noise(t, stddev=1e-3, name=None):
"""Adds gradient noise as described in http://arxiv.org/abs/1511.06807
The input Tensor `t` should be a gradient.
The output will be `t` + gaussian noise.
0.001 was said to be a good fixed value for memory networks."""
with tf.variable_scope('gradient_noise'):
gn = tf.random_normal(tf.shape(t), stddev=stddev)
return tf.add(t, gn)
# from https://github.com/domluna/memn2n
def _position_encoding(sentence_size, embedding_size):
"""We could have used RNN for parsing sentence but that tends to overfit.
The simpler choice would be to take sum of embedding but we loose loose positional information.
Position encoding is described in section 4.1 in "End to End Memory Networks" in more detail (http://arxiv.org/pdf/1503.08895v5.pdf)"""
encoding = np.ones((embedding_size, sentence_size), dtype=np.float32)
ls = sentence_size+1
le = embedding_size+1
for i in range(1, le):
for j in range(1, ls):
encoding[i-1, j-1] = (i - (le-1)/2) * (j - (ls-1)/2)
encoding = 1 + 4 * encoding / embedding_size / sentence_size
return np.transpose(encoding)
class DMN_PLUS(object):
def load_data(self, debug=False):
"""Loads train/valid/test data and sentence encoding"""
if self.config.train_mode:
self.train, self.valid, self.word_embedding, self.max_q_len, self.max_sentences, self.max_sen_len, self.vocab_size = babi_input.load_babi(self.config, split_sentences=True)
else:
self.test, self.word_embedding, self.max_q_len, self.max_sentences, self.max_sen_len, self.vocab_size = babi_input.load_babi(self.config, split_sentences=True)
self.encoding = _position_encoding(self.max_sen_len, self.config.embed_size)
def add_placeholders(self):
"""add data placeholder to graph"""
self.question_placeholder = tf.placeholder(tf.int32, shape=(self.config.batch_size, self.max_q_len))
self.input_placeholder = tf.placeholder(tf.int32, shape=(self.config.batch_size, self.max_sentences, self.max_sen_len))
self.question_len_placeholder = tf.placeholder(tf.int32, shape=(self.config.batch_size,))
self.input_len_placeholder = tf.placeholder(tf.int32, shape=(self.config.batch_size,))
self.answer_placeholder = tf.placeholder(tf.int64, shape=(self.config.batch_size,))
self.dropout_placeholder = tf.placeholder(tf.float32)
def get_predictions(self, output):
preds = tf.nn.softmax(output)
pred = tf.argmax(preds, 1)
return pred
def add_loss_op(self, output):
"""Calculate loss"""
loss = tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=output, labels=self.answer_placeholder))
# add l2 regularization for all variables except biases
for v in tf.trainable_variables():
if not 'bias' in v.name.lower():
loss += self.config.l2*tf.nn.l2_loss(v)
tf.summary.scalar('loss', loss)
return loss
def add_training_op(self, loss):
"""Calculate and apply gradients"""
opt = tf.train.AdamOptimizer(learning_rate=self.config.lr)
gvs = opt.compute_gradients(loss)
# optionally cap and noise gradients to regularize
if self.config.cap_grads:
gvs = [(tf.clip_by_norm(grad, self.config.max_grad_val), var) for grad, var in gvs]
if self.config.noisy_grads:
gvs = [(_add_gradient_noise(grad), var) for grad, var in gvs]
train_op = opt.apply_gradients(gvs)
return train_op
def get_question_representation(self):
"""Get question vectors via embedding and GRU"""
questions = tf.nn.embedding_lookup(self.embeddings, self.question_placeholder)
gru_cell = tf.contrib.rnn.GRUCell(self.config.hidden_size)
_, q_vec = tf.nn.dynamic_rnn(gru_cell,
questions,
dtype=np.float32,
sequence_length=self.question_len_placeholder
)
return q_vec
def get_input_representation(self):
"""Get fact (sentence) vectors via embedding, positional encoding and bi-directional GRU"""
# get word vectors from embedding
inputs = tf.nn.embedding_lookup(self.embeddings, self.input_placeholder)
# use encoding to get sentence representation
inputs = tf.reduce_sum(inputs * self.encoding, 2)
forward_gru_cell = tf.contrib.rnn.GRUCell(self.config.hidden_size)
backward_gru_cell = tf.contrib.rnn.GRUCell(self.config.hidden_size)
outputs, _ = tf.nn.bidirectional_dynamic_rnn(
forward_gru_cell,
backward_gru_cell,
inputs,
dtype=np.float32,
sequence_length=self.input_len_placeholder
)
# sum forward and backward output vectors
fact_vecs = tf.reduce_sum(tf.stack(outputs), axis=0)
# apply dropout
fact_vecs = tf.nn.dropout(fact_vecs, self.dropout_placeholder)
return fact_vecs
def get_attention(self, q_vec, prev_memory, fact_vec, reuse):
"""Use question vector and previous memory to create scalar attention for current fact"""
with tf.variable_scope("attention", reuse=reuse):
features = [fact_vec*q_vec,
fact_vec*prev_memory,
tf.abs(fact_vec - q_vec),
tf.abs(fact_vec - prev_memory)]
feature_vec = tf.concat(features, 1)
attention = tf.contrib.layers.fully_connected(feature_vec,
self.config.embed_size,
activation_fn=tf.nn.tanh,
reuse=reuse, scope="fc1")
attention = tf.contrib.layers.fully_connected(attention,
1,
activation_fn=None,
reuse=reuse, scope="fc2")
return attention
def generate_episode(self, memory, q_vec, fact_vecs, hop_index):
"""Generate episode by applying attention to current fact vectors through a modified GRU"""
attentions = [tf.squeeze(
self.get_attention(q_vec, memory, fv, bool(hop_index) or bool(i)), axis=1)
for i, fv in enumerate(tf.unstack(fact_vecs, axis=1))]
attentions = tf.transpose(tf.stack(attentions))
self.attentions.append(attentions)
attentions = tf.nn.softmax(attentions)
attentions = tf.expand_dims(attentions, axis=-1)
reuse = True if hop_index > 0 else False
# concatenate fact vectors and attentions for input into attGRU
gru_inputs = tf.concat([fact_vecs, attentions], 2)
with tf.variable_scope('attention_gru', reuse=reuse):
_, episode = tf.nn.dynamic_rnn(AttentionGRUCell(self.config.hidden_size),
gru_inputs,
dtype=np.float32,
sequence_length=self.input_len_placeholder
)
return episode
def add_answer_module(self, rnn_output, q_vec):
"""Linear softmax answer module"""
rnn_output = tf.nn.dropout(rnn_output, self.dropout_placeholder)
output = tf.layers.dense(tf.concat([rnn_output, q_vec], 1),
self.vocab_size,
activation=None)
return output
def inference(self):
"""Performs inference on the DMN model"""
# input fusion module
with tf.variable_scope("question", initializer=tf.contrib.layers.xavier_initializer()):
print('==> get question representation')
q_vec = self.get_question_representation()
with tf.variable_scope("input", initializer=tf.contrib.layers.xavier_initializer()):
print('==> get input representation')
fact_vecs = self.get_input_representation()
# keep track of attentions for possible strong supervision
self.attentions = []
# memory module
with tf.variable_scope("memory", initializer=tf.contrib.layers.xavier_initializer()):
print('==> build episodic memory')
# generate n_hops episodes
prev_memory = q_vec
for i in range(self.config.num_hops):
# get a new episode
print('==> generating episode', i)
episode = self.generate_episode(prev_memory, q_vec, fact_vecs, i)
# untied weights for memory update
with tf.variable_scope("hop_%d" % i):
prev_memory = tf.layers.dense(tf.concat([prev_memory, episode, q_vec], 1),
self.config.hidden_size,
activation=tf.nn.relu)
output = prev_memory
# pass memory module output through linear answer module
with tf.variable_scope("answer", initializer=tf.contrib.layers.xavier_initializer()):
output = self.add_answer_module(output, q_vec)
return output
def run_epoch(self, session, data, num_epoch=0, train_writer=None, train_op=None, verbose=2, train=False):
config = self.config
dp = config.dropout
if train_op is None:
train_op = tf.no_op()
dp = 1
total_steps = len(data[0]) // config.batch_size
total_loss = []
accuracy = 0
# shuffle data
p = np.random.permutation(len(data[0]))
qp, ip, ql, il, im, a = data
qp, ip, ql, il, im, a = qp[p], ip[p], ql[p], il[p], im[p], a[p]
for step in range(total_steps):
index = range(step*config.batch_size,(step+1)*config.batch_size)
feed = {self.question_placeholder: qp[index],
self.input_placeholder: ip[index],
self.question_len_placeholder: ql[index],
self.input_len_placeholder: il[index],
self.answer_placeholder: a[index],
self.dropout_placeholder: dp}
loss, pred, summary, _ = session.run(
[self.calculate_loss, self.pred, self.merged, train_op], feed_dict=feed)
if train_writer is not None:
train_writer.add_summary(summary, num_epoch*total_steps + step)
answers = a[step*config.batch_size:(step+1)*config.batch_size]
accuracy += np.sum(pred == answers)/float(len(answers))
total_loss.append(loss)
if verbose and step % verbose == 0:
sys.stdout.write('\r{} / {} : loss = {}'.format(
step, total_steps, np.mean(total_loss)))
sys.stdout.flush()
if verbose:
sys.stdout.write('\r')
return np.mean(total_loss), accuracy/float(total_steps)
def __init__(self, config):
self.config = config
self.variables_to_save = {}
self.load_data(debug=False)
self.add_placeholders()
# set up embedding
self.embeddings = tf.Variable(self.word_embedding.astype(np.float32), name="Embedding")
self.output = self.inference()
self.pred = self.get_predictions(self.output)
self.calculate_loss = self.add_loss_op(self.output)
self.train_step = self.add_training_op(self.calculate_loss)
self.merged = tf.summary.merge_all()
fix latent machine memory explosion problem
from __future__ import print_function
from __future__ import division
import sys
import time
import numpy as np
from copy import deepcopy
import tensorflow as tf
from attention_gru_cell import AttentionGRUCell
from tensorflow.contrib.cudnn_rnn.python.ops import cudnn_rnn_ops
import babi_input
class Config(object):
"""Holds model hyperparams and data information."""
batch_size = 100
embed_size = 80
hidden_size = 80
max_epochs = 256
early_stopping = 20
dropout = 0.9
lr = 0.001
l2 = 0.001
cap_grads = False
max_grad_val = 10
noisy_grads = False
word2vec_init = False
embedding_init = np.sqrt(3)
# NOTE not currently used hence non-sensical anneal_threshold
anneal_threshold = 1000
anneal_by = 1.5
num_hops = 3
num_attention_features = 4
max_allowed_inputs = 130
num_train = 9000
floatX = np.float32
babi_id = "1"
babi_test_id = ""
train_mode = True
def _add_gradient_noise(t, stddev=1e-3, name=None):
"""Adds gradient noise as described in http://arxiv.org/abs/1511.06807
The input Tensor `t` should be a gradient.
The output will be `t` + gaussian noise.
0.001 was said to be a good fixed value for memory networks."""
with tf.variable_scope('gradient_noise'):
gn = tf.random_normal(tf.shape(t), stddev=stddev)
return tf.add(t, gn)
# from https://github.com/domluna/memn2n
def _position_encoding(sentence_size, embedding_size):
"""We could have used RNN for parsing sentence but that tends to overfit.
The simpler choice would be to take sum of embedding but we loose loose positional information.
Position encoding is described in section 4.1 in "End to End Memory Networks" in more detail (http://arxiv.org/pdf/1503.08895v5.pdf)"""
encoding = np.ones((embedding_size, sentence_size), dtype=np.float32)
ls = sentence_size+1
le = embedding_size+1
for i in range(1, le):
for j in range(1, ls):
encoding[i-1, j-1] = (i - (le-1)/2) * (j - (ls-1)/2)
encoding = 1 + 4 * encoding / embedding_size / sentence_size
return np.transpose(encoding)
class DMN_PLUS(object):
def load_data(self, debug=False):
"""Loads train/valid/test data and sentence encoding"""
if self.config.train_mode:
self.train, self.valid, self.word_embedding, self.max_q_len, self.max_sentences, self.max_sen_len, self.vocab_size = babi_input.load_babi(self.config, split_sentences=True)
else:
self.test, self.word_embedding, self.max_q_len, self.max_sentences, self.max_sen_len, self.vocab_size = babi_input.load_babi(self.config, split_sentences=True)
self.encoding = _position_encoding(self.max_sen_len, self.config.embed_size)
def add_placeholders(self):
"""add data placeholder to graph"""
self.question_placeholder = tf.placeholder(tf.int32, shape=(self.config.batch_size, self.max_q_len))
self.input_placeholder = tf.placeholder(tf.int32, shape=(self.config.batch_size, self.max_sentences, self.max_sen_len))
self.question_len_placeholder = tf.placeholder(tf.int32, shape=(self.config.batch_size,))
self.input_len_placeholder = tf.placeholder(tf.int32, shape=(self.config.batch_size,))
self.answer_placeholder = tf.placeholder(tf.int64, shape=(self.config.batch_size,))
self.dropout_placeholder = tf.placeholder(tf.float32)
def get_predictions(self, output):
preds = tf.nn.softmax(output)
pred = tf.argmax(preds, 1)
return pred
def add_loss_op(self, output):
"""Calculate loss"""
loss = tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=output, labels=self.answer_placeholder))
# add l2 regularization for all variables except biases
for v in tf.trainable_variables():
if not 'bias' in v.name.lower():
loss += self.config.l2*tf.nn.l2_loss(v)
tf.summary.scalar('loss', loss)
return loss
def add_training_op(self, loss):
"""Calculate and apply gradients"""
opt = tf.train.AdamOptimizer(learning_rate=self.config.lr)
gvs = opt.compute_gradients(loss)
# optionally cap and noise gradients to regularize
if self.config.cap_grads:
gvs = [(tf.clip_by_norm(grad, self.config.max_grad_val), var) for grad, var in gvs]
if self.config.noisy_grads:
gvs = [(_add_gradient_noise(grad), var) for grad, var in gvs]
train_op = opt.apply_gradients(gvs)
return train_op
def get_question_representation(self):
"""Get question vectors via embedding and GRU"""
questions = tf.nn.embedding_lookup(self.embeddings, self.question_placeholder)
gru_cell = tf.contrib.rnn.GRUCell(self.config.hidden_size)
_, q_vec = tf.nn.dynamic_rnn(gru_cell,
questions,
dtype=np.float32,
sequence_length=self.question_len_placeholder
)
return q_vec
def get_input_representation(self):
"""Get fact (sentence) vectors via embedding, positional encoding and bi-directional GRU"""
# get word vectors from embedding
inputs = tf.nn.embedding_lookup(self.embeddings, self.input_placeholder)
# use encoding to get sentence representation
inputs = tf.reduce_sum(inputs * self.encoding, 2)
forward_gru_cell = tf.contrib.rnn.GRUCell(self.config.hidden_size)
backward_gru_cell = tf.contrib.rnn.GRUCell(self.config.hidden_size)
outputs, _ = tf.nn.bidirectional_dynamic_rnn(
forward_gru_cell,
backward_gru_cell,
inputs,
dtype=np.float32,
sequence_length=self.input_len_placeholder
)
# sum forward and backward output vectors
fact_vecs = tf.reduce_sum(tf.stack(outputs), axis=0)
# apply dropout
fact_vecs = tf.nn.dropout(fact_vecs, self.dropout_placeholder)
return fact_vecs
def get_attention(self, q_vec, prev_memory, fact_vec, reuse):
"""Use question vector and previous memory to create scalar attention for current fact"""
with tf.variable_scope("attention", reuse=reuse):
features = [fact_vec*q_vec,
fact_vec*prev_memory,
tf.abs(fact_vec - q_vec),
tf.abs(fact_vec - prev_memory)]
feature_vec = tf.concat(features, 1)
attention = tf.contrib.layers.fully_connected(feature_vec,
self.config.embed_size,
activation_fn=tf.nn.tanh,
reuse=reuse, scope="fc1")
attention = tf.contrib.layers.fully_connected(attention,
1,
activation_fn=None,
reuse=reuse, scope="fc2")
return attention
def generate_episode(self, memory, q_vec, fact_vecs, hop_index):
"""Generate episode by applying attention to current fact vectors through a modified GRU"""
attentions = [tf.squeeze(
self.get_attention(q_vec, memory, fv, bool(hop_index) or bool(i)), axis=1)
for i, fv in enumerate(tf.unstack(fact_vecs, axis=1))]
attentions = tf.transpose(tf.stack(attentions))
self.attentions.append(attentions)
attentions = tf.nn.softmax(attentions)
attentions = tf.expand_dims(attentions, axis=-1)
reuse = True if hop_index > 0 else False
# concatenate fact vectors and attentions for input into attGRU
gru_inputs = tf.concat([fact_vecs, attentions], 2)
with tf.variable_scope('attention_gru', reuse=reuse):
_, episode = tf.nn.dynamic_rnn(AttentionGRUCell(self.config.hidden_size),
gru_inputs,
dtype=np.float32,
sequence_length=self.input_len_placeholder
)
return episode
def add_answer_module(self, rnn_output, q_vec):
"""Linear softmax answer module"""
rnn_output = tf.nn.dropout(rnn_output, self.dropout_placeholder)
output = tf.layers.dense(tf.concat([rnn_output, q_vec], 1),
self.vocab_size,
activation=None)
return output
def inference(self):
"""Performs inference on the DMN model"""
# input fusion module
with tf.variable_scope("question", initializer=tf.contrib.layers.xavier_initializer()):
print('==> get question representation')
q_vec = self.get_question_representation()
with tf.variable_scope("input", initializer=tf.contrib.layers.xavier_initializer()):
print('==> get input representation')
fact_vecs = self.get_input_representation()
# keep track of attentions for possible strong supervision
self.attentions = []
# memory module
with tf.variable_scope("memory", initializer=tf.contrib.layers.xavier_initializer()):
print('==> build episodic memory')
# generate n_hops episodes
prev_memory = q_vec
for i in range(self.config.num_hops):
# get a new episode
print('==> generating episode', i)
episode = self.generate_episode(prev_memory, q_vec, fact_vecs, i)
# untied weights for memory update
with tf.variable_scope("hop_%d" % i):
prev_memory = tf.layers.dense(tf.concat([prev_memory, episode, q_vec], 1),
self.config.hidden_size,
activation=tf.nn.relu)
output = prev_memory
# pass memory module output through linear answer module
with tf.variable_scope("answer", initializer=tf.contrib.layers.xavier_initializer()):
output = self.add_answer_module(output, q_vec)
return output
def run_epoch(self, session, data, num_epoch=0, train_writer=None, train_op=None, verbose=2, train=False):
config = self.config
dp = config.dropout
if train_op is None:
# train_op = tf.no_op()
dp = 1
total_steps = len(data[0]) // config.batch_size
total_loss = []
accuracy = 0
# shuffle data
p = np.random.permutation(len(data[0]))
qp, ip, ql, il, im, a = data
qp, ip, ql, il, im, a = qp[p], ip[p], ql[p], il[p], im[p], a[p]
for step in range(total_steps):
index = range(step*config.batch_size,(step+1)*config.batch_size)
feed = {self.question_placeholder: qp[index],
self.input_placeholder: ip[index],
self.question_len_placeholder: ql[index],
self.input_len_placeholder: il[index],
self.answer_placeholder: a[index],
self.dropout_placeholder: dp}
if train_op is None:
loss, pred, summary, = session.run(
[self.calculate_loss, self.pred, self.merged], feed_dict=feed)
else:
loss, pred, summary, _ = session.run(
[self.calculate_loss, self.pred, self.merged, train_op], feed_dict=feed)
if train_writer is not None:
train_writer.add_summary(summary, num_epoch*total_steps + step)
answers = a[step*config.batch_size:(step+1)*config.batch_size]
accuracy += np.sum(pred == answers)/float(len(answers))
total_loss.append(loss)
if verbose and step % verbose == 0:
sys.stdout.write('\r{} / {} : loss = {}'.format(
step, total_steps, np.mean(total_loss)))
sys.stdout.flush()
if verbose:
sys.stdout.write('\r')
return np.mean(total_loss), accuracy/float(total_steps)
def __init__(self, config):
self.config = config
self.variables_to_save = {}
self.load_data(debug=False)
self.add_placeholders()
# set up embedding
self.embeddings = tf.Variable(self.word_embedding.astype(np.float32), name="Embedding")
self.output = self.inference()
self.pred = self.get_predictions(self.output)
self.calculate_loss = self.add_loss_op(self.output)
self.train_step = self.add_training_op(self.calculate_loss)
self.merged = tf.summary.merge_all()
|
# Copyright (C) 2003-2007 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS Zones."""
from __future__ import generators
import sys
import dns.exception
import dns.name
import dns.node
import dns.rdataclass
import dns.rdatatype
import dns.rdata
import dns.tokenizer
import dns.ttl
class BadZone(dns.exception.DNSException):
"""The zone is malformed."""
pass
class NoSOA(BadZone):
"""The zone has no SOA RR at its origin."""
pass
class NoNS(BadZone):
"""The zone has no NS RRset at its origin."""
pass
class UnknownOrigin(BadZone):
"""The zone's origin is unknown."""
pass
class Zone(object):
"""A DNS zone.
A Zone is a mapping from names to nodes. The zone object may be
treated like a Python dictionary, e.g. zone[name] will retrieve
the node associated with that name. The I{name} may be a
dns.name.Name object, or it may be a string. In the either case,
if the name is relative it is treated as relative to the origin of
the zone.
@ivar rdclass: The zone's rdata class; the default is class IN.
@type rdclass: int
@ivar origin: The origin of the zone.
@type origin: dns.name.Name object
@ivar nodes: A dictionary mapping the names of nodes in the zone to the
nodes themselves.
@type nodes: dict
@ivar relativize: should names in the zone be relativized?
@type relativize: bool
@cvar node_factory: the factory used to create a new node
@type node_factory: class or callable
"""
node_factory = dns.node.Node
__slots__ = ['rdclass', 'origin', 'nodes', 'relativize']
def __init__(self, origin, rdclass=dns.rdataclass.IN, relativize=True):
"""Initialize a zone object.
@param origin: The origin of the zone.
@type origin: dns.name.Name object
@param rdclass: The zone's rdata class; the default is class IN.
@type rdclass: int"""
self.rdclass = rdclass
self.origin = origin
self.nodes = {}
self.relativize = relativize
def __eq__(self, other):
"""Two zones are equal if they have the same origin, class, and
nodes.
@rtype: bool
"""
if not isinstance(other, Zone):
return False
if self.rdclass != other.rdclass or \
self.origin != other.origin or \
self.nodes != other.nodes:
return False
return True
def __ne__(self, other):
"""Are two zones not equal?
@rtype: bool
"""
return not self.__eq__(other)
def _validate_name(self, name):
if isinstance(name, (str, unicode)):
name = dns.name.from_text(name, None)
elif not isinstance(name, dns.name.Name):
raise KeyError, \
"name parameter must be convertable to a DNS name"
if name.is_absolute():
if not name.is_subdomain(self.origin):
raise KeyError, \
"name parameter must be a subdomain of the zone origin"
if self.relativize:
name = name.relativize(self.origin)
return name
def __getitem__(self, key):
key = self._validate_name(key)
return self.nodes[key]
def __setitem__(self, key, value):
key = self._validate_name(key)
self.nodes[key] = value
def __delitem__(self, key):
key = self._validate_name(key)
del self.nodes[key]
def __iter__(self):
return self.nodes.iterkeys()
def iterkeys(self):
return self.nodes.iterkeys()
def keys(self):
return self.nodes.keys()
def itervalues(self):
return self.nodes.itervalues()
def values(self):
return self.nodes.values()
def iteritems(self):
return self.nodes.iteritems()
def items(self):
return self.nodes.items()
def get(self, key):
key = self._validate_name(key)
return self.nodes.get(key)
def __contains__(self, other):
return other in self.nodes
def find_node(self, name, create=False):
"""Find a node in the zone, possibly creating it.
@param name: the name of the node to find
@type name: dns.name.Name object or string
@param create: should the node be created if it doesn't exist?
@type create: bool
@raises KeyError: the name is not known and create was not specified.
@rtype: dns.node.Node object
"""
name = self._validate_name(name)
node = self.nodes.get(name)
if node is None:
if not create:
raise KeyError
node = self.node_factory()
self.nodes[name] = node
return node
def get_node(self, name, create=False):
"""Get a node in the zone, possibly creating it.
This method is like L{find_node}, except it returns None instead
of raising an exception if the node does not exist and creation
has not been requested.
@param name: the name of the node to find
@type name: dns.name.Name object or string
@param create: should the node be created if it doesn't exist?
@type create: bool
@rtype: dns.node.Node object or None
"""
try:
node = self.find_node(name, create)
except KeyError:
node = None
return node
def delete_node(self, name):
"""Delete the specified node if it exists.
It is not an error if the node does not exist.
"""
name = self._validate_name(name)
if self.nodes.has_key(name):
del self.nodes[name]
def find_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE,
create=False):
"""Look for rdata with the specified name and type in the zone,
and return an rdataset encapsulating it.
The I{name}, I{rdtype}, and I{covers} parameters may be
strings, in which case they will be converted to their proper
type.
The rdataset returned is not a copy; changes to it will change
the zone.
KeyError is raised if the name or type are not found.
Use L{get_rdataset} if you want to have None returned instead.
@param name: the owner name to look for
@type name: DNS.name.Name object or string
@param rdtype: the rdata type desired
@type rdtype: int or string
@param covers: the covered type (defaults to None)
@type covers: int or string
@param create: should the node and rdataset be created if they do not
exist?
@type create: bool
@raises KeyError: the node or rdata could not be found
@rtype: dns.rrset.RRset object
"""
name = self._validate_name(name)
if isinstance(rdtype, str):
rdtype = dns.rdatatype.from_text(rdtype)
if isinstance(covers, str):
covers = dns.rdatatype.from_text(covers)
node = self.find_node(name, create)
return node.find_rdataset(self.rdclass, rdtype, covers, create)
def get_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE,
create=False):
"""Look for rdata with the specified name and type in the zone,
and return an rdataset encapsulating it.
The I{name}, I{rdtype}, and I{covers} parameters may be
strings, in which case they will be converted to their proper
type.
The rdataset returned is not a copy; changes to it will change
the zone.
None is returned if the name or type are not found.
Use L{find_rdataset} if you want to have KeyError raised instead.
@param name: the owner name to look for
@type name: DNS.name.Name object or string
@param rdtype: the rdata type desired
@type rdtype: int or string
@param covers: the covered type (defaults to None)
@type covers: int or string
@param create: should the node and rdataset be created if they do not
exist?
@type create: bool
@rtype: dns.rrset.RRset object
"""
try:
rdataset = self.find_rdataset(name, rdtype, covers, create)
except KeyError:
rdataset = None
return rdataset
def delete_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE):
"""Delete the rdataset matching I{rdtype} and I{covers}, if it
exists at the node specified by I{name}.
The I{name}, I{rdtype}, and I{covers} parameters may be
strings, in which case they will be converted to their proper
type.
It is not an error if the node does not exist, or if there is no
matching rdataset at the node.
If the node has no rdatasets after the deletion, it will itself
be deleted.
@param name: the owner name to look for
@type name: DNS.name.Name object or string
@param rdtype: the rdata type desired
@type rdtype: int or string
@param covers: the covered type (defaults to None)
@type covers: int or string
"""
name = self._validate_name(name)
if isinstance(rdtype, str):
rdtype = dns.rdatatype.from_text(rdtype)
if isinstance(covers, str):
covers = dns.rdatatype.from_text(covers)
node = self.get_node(name)
if not node is None:
node.delete_rdataset(self.rdclass, rdtype, covers)
if len(node) == 0:
self.delete_node(name)
def replace_rdataset(self, name, replacement):
"""Replace an rdataset at name.
It is not an error if there is no rdataset matching I{replacement}.
Ownership of the I{replacement} object is transferred to the zone;
in other words, this method does not store a copy of I{replacement}
at the node, it stores I{replacement} itself.
If the I{name} node does not exist, it is created.
@param name: the owner name
@type name: DNS.name.Name object or string
@param replacement: the replacement rdataset
@type replacement: dns.rdataset.Rdataset
"""
if replacement.rdclass != self.rdclass:
raise ValueError, 'replacement.rdclass != zone.rdclass'
node = self.find_node(name, True)
node.replace_rdataset(replacement)
def find_rrset(self, name, rdtype, covers=dns.rdatatype.NONE):
"""Look for rdata with the specified name and type in the zone,
and return an RRset encapsulating it.
The I{name}, I{rdtype}, and I{covers} parameters may be
strings, in which case they will be converted to their proper
type.
This method is less efficient than the similar
L{find_rdataset} because it creates an RRset instead of
returning the matching rdataset. It may be more convenient
for some uses since it returns an object which binds the owner
name to the rdata.
This method may not be used to create new nodes or rdatasets;
use L{find_rdataset} instead.
KeyError is raised if the name or type are not found.
Use L{get_rrset} if you want to have None returned instead.
@param name: the owner name to look for
@type name: DNS.name.Name object or string
@param rdtype: the rdata type desired
@type rdtype: int or string
@param covers: the covered type (defaults to None)
@type covers: int or string
@raises KeyError: the node or rdata could not be found
@rtype: dns.rrset.RRset object
"""
name = self._validate_name(name)
if isinstance(rdtype, str):
rdtype = dns.rdatatype.from_text(rdtype)
if isinstance(covers, str):
covers = dns.rdatatype.from_text(covers)
rdataset = self.nodes[name].find_rdataset(self.rdclass, rdtype, covers)
rrset = dns.rrset.RRset(name, self.rdclass, rdtype, covers)
rrset.update(rdataset)
return rrset
def get_rrset(self, name, rdtype, covers=dns.rdatatype.NONE):
"""Look for rdata with the specified name and type in the zone,
and return an RRset encapsulating it.
The I{name}, I{rdtype}, and I{covers} parameters may be
strings, in which case they will be converted to their proper
type.
This method is less efficient than the similar L{get_rdataset}
because it creates an RRset instead of returning the matching
rdataset. It may be more convenient for some uses since it
returns an object which binds the owner name to the rdata.
This method may not be used to create new nodes or rdatasets;
use L{find_rdataset} instead.
None is returned if the name or type are not found.
Use L{find_rrset} if you want to have KeyError raised instead.
@param name: the owner name to look for
@type name: DNS.name.Name object or string
@param rdtype: the rdata type desired
@type rdtype: int or string
@param covers: the covered type (defaults to None)
@type covers: int or string
@rtype: dns.rrset.RRset object
"""
try:
rrset = self.find_rrset(name, rdtype, covers)
except KeyError:
rrset = None
return rrset
def iterate_rdatasets(self, rdtype=dns.rdatatype.ANY,
covers=dns.rdatatype.NONE):
"""Return a generator which yields (name, rdataset) tuples for
all rdatasets in the zone which have the specified I{rdtype}
and I{covers}. If I{rdtype} is dns.rdatatype.ANY, the default,
then all rdatasets will be matched.
@param rdtype: int or string
@type rdtype: int or string
@param covers: the covered type (defaults to None)
@type covers: int or string
"""
if isinstance(rdtype, str):
rdtype = dns.rdatatype.from_text(rdtype)
if isinstance(covers, str):
covers = dns.rdatatype.from_text(covers)
for (name, node) in self.iteritems():
for rds in node:
if rdtype == dns.rdatatype.ANY or \
(rds.rdtype == rdtype and rds.covers == covers):
yield (name, rds)
def iterate_rdatas(self, rdtype=dns.rdatatype.ANY,
covers=dns.rdatatype.NONE):
"""Return a generator which yields (name, ttl, rdata) tuples for
all rdatas in the zone which have the specified I{rdtype}
and I{covers}. If I{rdtype} is dns.rdatatype.ANY, the default,
then all rdatas will be matched.
@param rdtype: int or string
@type rdtype: int or string
@param covers: the covered type (defaults to None)
@type covers: int or string
"""
if isinstance(rdtype, str):
rdtype = dns.rdatatype.from_text(rdtype)
if isinstance(covers, str):
covers = dns.rdatatype.from_text(covers)
for (name, node) in self.iteritems():
for rds in node:
if rdtype == dns.rdatatype.ANY or \
(rds.rdtype == rdtype and rds.covers == covers):
for rdata in rds:
yield (name, rds.ttl, rdata)
def to_file(self, f, sorted=True, relativize=True, nl=None):
"""Write a zone to a file.
@param f: file or string. If I{f} is a string, it is treated
as the name of a file to open.
@param sorted: if True, the file will be written with the
names sorted in DNSSEC order from least to greatest. Otherwise
the names will be written in whatever order they happen to have
in the zone's dictionary.
@param relativize: if True, domain names in the output will be
relativized to the zone's origin (if possible).
@type relativize: bool
@param nl: The end of line string. If not specified, the
output will use the platform's native end-of-line marker (i.e.
LF on POSIX, CRLF on Windows, CR on Macintosh).
@type nl: string or None
"""
if sys.hexversion >= 0x02030000:
# allow Unicode filenames
str_type = basestring
else:
str_type = str
if nl is None:
opts = 'w'
else:
opts = 'wb'
if isinstance(f, str_type):
f = file(f, opts)
want_close = True
else:
want_close = False
try:
if sorted:
names = self.keys()
names.sort()
else:
names = self.iterkeys()
for n in names:
l = self[n].to_text(n, origin=self.origin,
relativize=relativize)
if nl is None:
print >> f, l
else:
f.write(l)
f.write(nl)
finally:
if want_close:
f.close()
def check_origin(self):
"""Do some simple checking of the zone's origin.
@raises dns.zone.NoSOA: there is no SOA RR
@raises dns.zone.NoNS: there is no NS RRset
@raises KeyError: there is no origin node
"""
if self.relativize:
name = dns.name.empty
else:
name = self.origin
if self.get_rdataset(name, dns.rdatatype.SOA) is None:
raise NoSOA
if self.get_rdataset(name, dns.rdatatype.NS) is None:
raise NoNS
class _MasterReader(object):
"""Read a DNS master file
@ivar tok: The tokenizer
@type tok: dns.tokenizer.Tokenizer object
@ivar ttl: The default TTL
@type ttl: int
@ivar last_name: The last name read
@type last_name: dns.name.Name object
@ivar current_origin: The current origin
@type current_origin: dns.name.Name object
@ivar relativize: should names in the zone be relativized?
@type relativize: bool
@ivar zone: the zone
@type zone: dns.zone.Zone object
@ivar saved_state: saved reader state (used when processing $INCLUDE)
@type saved_state: list of (tokenizer, current_origin, last_name, file)
tuples.
@ivar current_file: the file object of the $INCLUDed file being parsed
(None if no $INCLUDE is active).
@ivar allow_include: is $INCLUDE allowed?
@type allow_include: bool
@ivar check_origin: should sanity checks of the origin node be done?
The default is True.
@type check_origin: bool
"""
def __init__(self, tok, origin, rdclass, relativize, zone_factory=Zone,
allow_include=False, check_origin=True):
if isinstance(origin, (str, unicode)):
origin = dns.name.from_text(origin)
self.tok = tok
self.current_origin = origin
self.relativize = relativize
self.ttl = 0
self.last_name = None
self.zone = zone_factory(origin, rdclass, relativize=relativize)
self.saved_state = []
self.current_file = None
self.allow_include = allow_include
self.check_origin = check_origin
def _eat_line(self):
while 1:
(ttype, t) = self.tok.get()
if ttype == dns.tokenizer.EOL or ttype == dns.tokenizer.EOF:
break
def _rr_line(self):
"""Process one line from a DNS master file."""
# Name
if self.current_origin is None:
raise UnknownOrigin
token = self.tok.get(want_leading = True)
if token[0] != dns.tokenizer.WHITESPACE:
self.last_name = dns.name.from_text(token[1], self.current_origin)
else:
token = self.tok.get()
if token[0] == dns.tokenizer.EOL or \
token[0] == dns.tokenizer.EOF:
# treat leading WS followed by EOL/EOF as if they were EOL/EOF.
return
self.tok.unget(token)
name = self.last_name
if not name.is_subdomain(self.zone.origin):
self._eat_line()
return
if self.relativize:
name = name.relativize(self.zone.origin)
token = self.tok.get()
if token[0] != dns.tokenizer.IDENTIFIER:
raise dns.exception.SyntaxError
# TTL
try:
ttl = dns.ttl.from_text(token[1])
token = self.tok.get()
if token[0] != dns.tokenizer.IDENTIFIER:
raise dns.exception.SyntaxError
except dns.ttl.BadTTL:
ttl = self.ttl
# Class
try:
rdclass = dns.rdataclass.from_text(token[1])
token = self.tok.get()
if token[0] != dns.tokenizer.IDENTIFIER:
raise dns.exception.SyntaxError
except dns.exception.SyntaxError:
raise dns.exception.SyntaxError
except:
rdclass = self.zone.rdclass
if rdclass != self.zone.rdclass:
raise dns.exception.SyntaxError, "RR class is not zone's class"
# Type
try:
rdtype = dns.rdatatype.from_text(token[1])
except:
raise dns.exception.SyntaxError, \
"unknown rdatatype '%s'" % token[1]
n = self.zone.nodes.get(name)
if n is None:
n = self.zone.node_factory()
self.zone.nodes[name] = n
try:
rd = dns.rdata.from_text(rdclass, rdtype, self.tok,
self.current_origin, False)
except dns.exception.SyntaxError:
# Catch and reraise.
(ty, va) = sys.exc_info()[:2]
raise ty, va
except:
# All exceptions that occur in the processing of rdata
# are treated as syntax errors. This is not strictly
# correct, but it is correct almost all of the time.
# We convert them to syntax errors so that we can emit
# helpful filename:line info.
(ty, va) = sys.exc_info()[:2]
raise dns.exception.SyntaxError, \
"caught exception %s: %s" % (str(ty), str(va))
rd.choose_relativity(self.zone.origin, self.relativize)
covers = rd.covers()
rds = n.find_rdataset(rdclass, rdtype, covers, True)
rds.add(rd, ttl)
def read(self):
"""Read a DNS master file and build a zone object.
@raises dns.zone.NoSOA: No SOA RR was found at the zone origin
@raises dns.zone.NoNS: No NS RRset was found at the zone origin
"""
try:
while 1:
token = self.tok.get(True, True)
if token[0] == dns.tokenizer.EOF:
if not self.current_file is None:
self.current_file.close()
if len(self.saved_state) > 0:
(self.tok,
self.current_origin,
self.last_name,
self.current_file,
self.ttl) = self.saved_state.pop(-1)
continue
break
elif token[0] == dns.tokenizer.EOL:
continue
elif token[0] == dns.tokenizer.COMMENT:
self.tok.get_eol()
continue
elif token[1][0] == '$':
u = token[1].upper()
if u == '$TTL':
token = self.tok.get()
if token[0] != dns.tokenizer.IDENTIFIER:
raise dns.exception.SyntaxError, "bad $TTL"
self.ttl = dns.ttl.from_text(token[1])
self.tok.get_eol()
elif u == '$ORIGIN':
self.current_origin = self.tok.get_name()
self.tok.get_eol()
if self.zone.origin is None:
self.zone.origin = self.current_origin
elif u == '$INCLUDE' and self.allow_include:
token = self.tok.get()
if token[0] != dns.tokenizer.QUOTED_STRING:
raise dns.exception.SyntaxError, \
"bad filename in $INCLUDE"
filename = token[1]
token = self.tok.get()
if token[0] == dns.tokenizer.IDENTIFIER:
new_origin = dns.name.from_text(token[1], \
self.current_origin)
self.tok.get_eol()
elif token[0] != dns.tokenizer.EOL and \
token[0] != dns.tokenizer.EOF:
raise dns.exception.SyntaxError, \
"bad origin in $INCLUDE"
else:
new_origin = self.current_origin
self.saved_state.append((self.tok,
self.current_origin,
self.last_name,
self.current_file,
self.ttl))
self.current_file = file(filename, 'r')
self.tok = dns.tokenizer.Tokenizer(self.current_file,
filename)
self.current_origin = new_origin
else:
raise dns.exception.SyntaxError, \
"Unknown master file directive '" + u + "'"
continue
self.tok.unget(token)
self._rr_line()
except dns.exception.SyntaxError, detail:
(filename, line_number) = self.tok.where()
if detail is None:
detail = "syntax error"
raise dns.exception.SyntaxError, \
"%s:%d: %s" % (filename, line_number, detail)
# Now that we're done reading, do some basic checking of the zone.
if self.check_origin:
self.zone.check_origin()
def from_text(text, origin = None, rdclass = dns.rdataclass.IN,
relativize = True, zone_factory=Zone, filename=None,
allow_include=False, check_origin=True):
"""Build a zone object from a master file format string.
@param text: the master file format input
@type text: string.
@param origin: The origin of the zone; if not specified, the first
$ORIGIN statement in the master file will determine the origin of the
zone.
@type origin: dns.name.Name object or string
@param rdclass: The zone's rdata class; the default is class IN.
@type rdclass: int
@param relativize: should names be relativized? The default is True
@type relativize: bool
@param zone_factory: The zone factory to use
@type zone_factory: function returning a Zone
@param filename: The filename to emit when describing where an error
occurred; the default is '<string>'.
@type filename: string
@param allow_include: is $INCLUDE allowed?
@type allow_include: bool
@param check_origin: should sanity checks of the origin node be done?
The default is True.
@type check_origin: bool
@raises dns.zone.NoSOA: No SOA RR was found at the zone origin
@raises dns.zone.NoNS: No NS RRset was found at the zone origin
@rtype: dns.zone.Zone object
"""
# 'text' can also be a file, but we don't publish that fact
# since it's an implementation detail. The official file
# interface is from_file().
if filename is None:
filename = '<string>'
tok = dns.tokenizer.Tokenizer(text, filename)
reader = _MasterReader(tok, origin, rdclass, relativize, zone_factory,
allow_include=allow_include,
check_origin=check_origin)
reader.read()
return reader.zone
def from_file(f, origin = None, rdclass = dns.rdataclass.IN,
relativize = True, zone_factory=Zone, filename=None,
allow_include=True, check_origin=True):
"""Read a master file and build a zone object.
@param f: file or string. If I{f} is a string, it is treated
as the name of a file to open.
@param origin: The origin of the zone; if not specified, the first
$ORIGIN statement in the master file will determine the origin of the
zone.
@type origin: dns.name.Name object or string
@param rdclass: The zone's rdata class; the default is class IN.
@type rdclass: int
@param relativize: should names be relativized? The default is True
@type relativize: bool
@param zone_factory: The zone factory to use
@type zone_factory: function returning a Zone
@param filename: The filename to emit when describing where an error
occurred; the default is '<file>', or the value of I{f} if I{f} is a
string.
@type filename: string
@param allow_include: is $INCLUDE allowed?
@type allow_include: bool
@param check_origin: should sanity checks of the origin node be done?
The default is True.
@type check_origin: bool
@raises dns.zone.NoSOA: No SOA RR was found at the zone origin
@raises dns.zone.NoNS: No NS RRset was found at the zone origin
@rtype: dns.zone.Zone object
"""
if sys.hexversion >= 0x02030000:
# allow Unicode filenames; turn on universal newline support
str_type = basestring
opts = 'rU'
else:
str_type = str
opts = 'r'
if isinstance(f, str_type):
if filename is None:
filename = f
f = file(f, opts)
want_close = True
else:
if filename is None:
filename = '<file>'
want_close = False
try:
z = from_text(f, origin, rdclass, relativize, zone_factory,
filename, allow_include, check_origin)
finally:
if want_close:
f.close()
return z
def from_xfr(xfr, zone_factory=Zone, relativize=True):
"""Convert the output of a zone transfer generator into a zone object.
@param xfr: The xfr generator
@type xfr: generator of dns.message.Message objects
@param relativize: should names be relativized? The default is True.
It is essential that the relativize setting matches the one specified
to dns.query.xfr().
@type relativize: bool
@raises dns.zone.NoSOA: No SOA RR was found at the zone origin
@raises dns.zone.NoNS: No NS RRset was found at the zone origin
@rtype: dns.zone.Zone object
"""
z = None
for r in xfr:
if z is None:
if relativize:
origin = r.origin
else:
origin = r.answer[0].name
rdclass = r.answer[0].rdclass
z = zone_factory(origin, rdclass, relativize=relativize)
for rrset in r.answer:
znode = z.nodes.get(rrset.name)
if not znode:
znode = z.node_factory()
z.nodes[rrset.name] = znode
zrds = znode.find_rdataset(rrset.rdclass, rrset.rdtype,
rrset.covers, True)
zrds.update_ttl(rrset.ttl)
for rd in rrset:
rd.choose_relativity(z.origin, relativize)
zrds.add(rd)
z.check_origin()
return z
import dns.rrset, since we use it
# Copyright (C) 2003-2007 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS Zones."""
from __future__ import generators
import sys
import dns.exception
import dns.name
import dns.node
import dns.rdataclass
import dns.rdatatype
import dns.rdata
import dns.rrset
import dns.tokenizer
import dns.ttl
class BadZone(dns.exception.DNSException):
"""The zone is malformed."""
pass
class NoSOA(BadZone):
"""The zone has no SOA RR at its origin."""
pass
class NoNS(BadZone):
"""The zone has no NS RRset at its origin."""
pass
class UnknownOrigin(BadZone):
"""The zone's origin is unknown."""
pass
class Zone(object):
"""A DNS zone.
A Zone is a mapping from names to nodes. The zone object may be
treated like a Python dictionary, e.g. zone[name] will retrieve
the node associated with that name. The I{name} may be a
dns.name.Name object, or it may be a string. In the either case,
if the name is relative it is treated as relative to the origin of
the zone.
@ivar rdclass: The zone's rdata class; the default is class IN.
@type rdclass: int
@ivar origin: The origin of the zone.
@type origin: dns.name.Name object
@ivar nodes: A dictionary mapping the names of nodes in the zone to the
nodes themselves.
@type nodes: dict
@ivar relativize: should names in the zone be relativized?
@type relativize: bool
@cvar node_factory: the factory used to create a new node
@type node_factory: class or callable
"""
node_factory = dns.node.Node
__slots__ = ['rdclass', 'origin', 'nodes', 'relativize']
def __init__(self, origin, rdclass=dns.rdataclass.IN, relativize=True):
"""Initialize a zone object.
@param origin: The origin of the zone.
@type origin: dns.name.Name object
@param rdclass: The zone's rdata class; the default is class IN.
@type rdclass: int"""
self.rdclass = rdclass
self.origin = origin
self.nodes = {}
self.relativize = relativize
def __eq__(self, other):
"""Two zones are equal if they have the same origin, class, and
nodes.
@rtype: bool
"""
if not isinstance(other, Zone):
return False
if self.rdclass != other.rdclass or \
self.origin != other.origin or \
self.nodes != other.nodes:
return False
return True
def __ne__(self, other):
"""Are two zones not equal?
@rtype: bool
"""
return not self.__eq__(other)
def _validate_name(self, name):
if isinstance(name, (str, unicode)):
name = dns.name.from_text(name, None)
elif not isinstance(name, dns.name.Name):
raise KeyError, \
"name parameter must be convertable to a DNS name"
if name.is_absolute():
if not name.is_subdomain(self.origin):
raise KeyError, \
"name parameter must be a subdomain of the zone origin"
if self.relativize:
name = name.relativize(self.origin)
return name
def __getitem__(self, key):
key = self._validate_name(key)
return self.nodes[key]
def __setitem__(self, key, value):
key = self._validate_name(key)
self.nodes[key] = value
def __delitem__(self, key):
key = self._validate_name(key)
del self.nodes[key]
def __iter__(self):
return self.nodes.iterkeys()
def iterkeys(self):
return self.nodes.iterkeys()
def keys(self):
return self.nodes.keys()
def itervalues(self):
return self.nodes.itervalues()
def values(self):
return self.nodes.values()
def iteritems(self):
return self.nodes.iteritems()
def items(self):
return self.nodes.items()
def get(self, key):
key = self._validate_name(key)
return self.nodes.get(key)
def __contains__(self, other):
return other in self.nodes
def find_node(self, name, create=False):
"""Find a node in the zone, possibly creating it.
@param name: the name of the node to find
@type name: dns.name.Name object or string
@param create: should the node be created if it doesn't exist?
@type create: bool
@raises KeyError: the name is not known and create was not specified.
@rtype: dns.node.Node object
"""
name = self._validate_name(name)
node = self.nodes.get(name)
if node is None:
if not create:
raise KeyError
node = self.node_factory()
self.nodes[name] = node
return node
def get_node(self, name, create=False):
"""Get a node in the zone, possibly creating it.
This method is like L{find_node}, except it returns None instead
of raising an exception if the node does not exist and creation
has not been requested.
@param name: the name of the node to find
@type name: dns.name.Name object or string
@param create: should the node be created if it doesn't exist?
@type create: bool
@rtype: dns.node.Node object or None
"""
try:
node = self.find_node(name, create)
except KeyError:
node = None
return node
def delete_node(self, name):
"""Delete the specified node if it exists.
It is not an error if the node does not exist.
"""
name = self._validate_name(name)
if self.nodes.has_key(name):
del self.nodes[name]
def find_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE,
create=False):
"""Look for rdata with the specified name and type in the zone,
and return an rdataset encapsulating it.
The I{name}, I{rdtype}, and I{covers} parameters may be
strings, in which case they will be converted to their proper
type.
The rdataset returned is not a copy; changes to it will change
the zone.
KeyError is raised if the name or type are not found.
Use L{get_rdataset} if you want to have None returned instead.
@param name: the owner name to look for
@type name: DNS.name.Name object or string
@param rdtype: the rdata type desired
@type rdtype: int or string
@param covers: the covered type (defaults to None)
@type covers: int or string
@param create: should the node and rdataset be created if they do not
exist?
@type create: bool
@raises KeyError: the node or rdata could not be found
@rtype: dns.rrset.RRset object
"""
name = self._validate_name(name)
if isinstance(rdtype, str):
rdtype = dns.rdatatype.from_text(rdtype)
if isinstance(covers, str):
covers = dns.rdatatype.from_text(covers)
node = self.find_node(name, create)
return node.find_rdataset(self.rdclass, rdtype, covers, create)
def get_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE,
create=False):
"""Look for rdata with the specified name and type in the zone,
and return an rdataset encapsulating it.
The I{name}, I{rdtype}, and I{covers} parameters may be
strings, in which case they will be converted to their proper
type.
The rdataset returned is not a copy; changes to it will change
the zone.
None is returned if the name or type are not found.
Use L{find_rdataset} if you want to have KeyError raised instead.
@param name: the owner name to look for
@type name: DNS.name.Name object or string
@param rdtype: the rdata type desired
@type rdtype: int or string
@param covers: the covered type (defaults to None)
@type covers: int or string
@param create: should the node and rdataset be created if they do not
exist?
@type create: bool
@rtype: dns.rrset.RRset object
"""
try:
rdataset = self.find_rdataset(name, rdtype, covers, create)
except KeyError:
rdataset = None
return rdataset
def delete_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE):
"""Delete the rdataset matching I{rdtype} and I{covers}, if it
exists at the node specified by I{name}.
The I{name}, I{rdtype}, and I{covers} parameters may be
strings, in which case they will be converted to their proper
type.
It is not an error if the node does not exist, or if there is no
matching rdataset at the node.
If the node has no rdatasets after the deletion, it will itself
be deleted.
@param name: the owner name to look for
@type name: DNS.name.Name object or string
@param rdtype: the rdata type desired
@type rdtype: int or string
@param covers: the covered type (defaults to None)
@type covers: int or string
"""
name = self._validate_name(name)
if isinstance(rdtype, str):
rdtype = dns.rdatatype.from_text(rdtype)
if isinstance(covers, str):
covers = dns.rdatatype.from_text(covers)
node = self.get_node(name)
if not node is None:
node.delete_rdataset(self.rdclass, rdtype, covers)
if len(node) == 0:
self.delete_node(name)
def replace_rdataset(self, name, replacement):
"""Replace an rdataset at name.
It is not an error if there is no rdataset matching I{replacement}.
Ownership of the I{replacement} object is transferred to the zone;
in other words, this method does not store a copy of I{replacement}
at the node, it stores I{replacement} itself.
If the I{name} node does not exist, it is created.
@param name: the owner name
@type name: DNS.name.Name object or string
@param replacement: the replacement rdataset
@type replacement: dns.rdataset.Rdataset
"""
if replacement.rdclass != self.rdclass:
raise ValueError, 'replacement.rdclass != zone.rdclass'
node = self.find_node(name, True)
node.replace_rdataset(replacement)
def find_rrset(self, name, rdtype, covers=dns.rdatatype.NONE):
"""Look for rdata with the specified name and type in the zone,
and return an RRset encapsulating it.
The I{name}, I{rdtype}, and I{covers} parameters may be
strings, in which case they will be converted to their proper
type.
This method is less efficient than the similar
L{find_rdataset} because it creates an RRset instead of
returning the matching rdataset. It may be more convenient
for some uses since it returns an object which binds the owner
name to the rdata.
This method may not be used to create new nodes or rdatasets;
use L{find_rdataset} instead.
KeyError is raised if the name or type are not found.
Use L{get_rrset} if you want to have None returned instead.
@param name: the owner name to look for
@type name: DNS.name.Name object or string
@param rdtype: the rdata type desired
@type rdtype: int or string
@param covers: the covered type (defaults to None)
@type covers: int or string
@raises KeyError: the node or rdata could not be found
@rtype: dns.rrset.RRset object
"""
name = self._validate_name(name)
if isinstance(rdtype, str):
rdtype = dns.rdatatype.from_text(rdtype)
if isinstance(covers, str):
covers = dns.rdatatype.from_text(covers)
rdataset = self.nodes[name].find_rdataset(self.rdclass, rdtype, covers)
rrset = dns.rrset.RRset(name, self.rdclass, rdtype, covers)
rrset.update(rdataset)
return rrset
def get_rrset(self, name, rdtype, covers=dns.rdatatype.NONE):
"""Look for rdata with the specified name and type in the zone,
and return an RRset encapsulating it.
The I{name}, I{rdtype}, and I{covers} parameters may be
strings, in which case they will be converted to their proper
type.
This method is less efficient than the similar L{get_rdataset}
because it creates an RRset instead of returning the matching
rdataset. It may be more convenient for some uses since it
returns an object which binds the owner name to the rdata.
This method may not be used to create new nodes or rdatasets;
use L{find_rdataset} instead.
None is returned if the name or type are not found.
Use L{find_rrset} if you want to have KeyError raised instead.
@param name: the owner name to look for
@type name: DNS.name.Name object or string
@param rdtype: the rdata type desired
@type rdtype: int or string
@param covers: the covered type (defaults to None)
@type covers: int or string
@rtype: dns.rrset.RRset object
"""
try:
rrset = self.find_rrset(name, rdtype, covers)
except KeyError:
rrset = None
return rrset
def iterate_rdatasets(self, rdtype=dns.rdatatype.ANY,
covers=dns.rdatatype.NONE):
"""Return a generator which yields (name, rdataset) tuples for
all rdatasets in the zone which have the specified I{rdtype}
and I{covers}. If I{rdtype} is dns.rdatatype.ANY, the default,
then all rdatasets will be matched.
@param rdtype: int or string
@type rdtype: int or string
@param covers: the covered type (defaults to None)
@type covers: int or string
"""
if isinstance(rdtype, str):
rdtype = dns.rdatatype.from_text(rdtype)
if isinstance(covers, str):
covers = dns.rdatatype.from_text(covers)
for (name, node) in self.iteritems():
for rds in node:
if rdtype == dns.rdatatype.ANY or \
(rds.rdtype == rdtype and rds.covers == covers):
yield (name, rds)
def iterate_rdatas(self, rdtype=dns.rdatatype.ANY,
covers=dns.rdatatype.NONE):
"""Return a generator which yields (name, ttl, rdata) tuples for
all rdatas in the zone which have the specified I{rdtype}
and I{covers}. If I{rdtype} is dns.rdatatype.ANY, the default,
then all rdatas will be matched.
@param rdtype: int or string
@type rdtype: int or string
@param covers: the covered type (defaults to None)
@type covers: int or string
"""
if isinstance(rdtype, str):
rdtype = dns.rdatatype.from_text(rdtype)
if isinstance(covers, str):
covers = dns.rdatatype.from_text(covers)
for (name, node) in self.iteritems():
for rds in node:
if rdtype == dns.rdatatype.ANY or \
(rds.rdtype == rdtype and rds.covers == covers):
for rdata in rds:
yield (name, rds.ttl, rdata)
def to_file(self, f, sorted=True, relativize=True, nl=None):
"""Write a zone to a file.
@param f: file or string. If I{f} is a string, it is treated
as the name of a file to open.
@param sorted: if True, the file will be written with the
names sorted in DNSSEC order from least to greatest. Otherwise
the names will be written in whatever order they happen to have
in the zone's dictionary.
@param relativize: if True, domain names in the output will be
relativized to the zone's origin (if possible).
@type relativize: bool
@param nl: The end of line string. If not specified, the
output will use the platform's native end-of-line marker (i.e.
LF on POSIX, CRLF on Windows, CR on Macintosh).
@type nl: string or None
"""
if sys.hexversion >= 0x02030000:
# allow Unicode filenames
str_type = basestring
else:
str_type = str
if nl is None:
opts = 'w'
else:
opts = 'wb'
if isinstance(f, str_type):
f = file(f, opts)
want_close = True
else:
want_close = False
try:
if sorted:
names = self.keys()
names.sort()
else:
names = self.iterkeys()
for n in names:
l = self[n].to_text(n, origin=self.origin,
relativize=relativize)
if nl is None:
print >> f, l
else:
f.write(l)
f.write(nl)
finally:
if want_close:
f.close()
def check_origin(self):
"""Do some simple checking of the zone's origin.
@raises dns.zone.NoSOA: there is no SOA RR
@raises dns.zone.NoNS: there is no NS RRset
@raises KeyError: there is no origin node
"""
if self.relativize:
name = dns.name.empty
else:
name = self.origin
if self.get_rdataset(name, dns.rdatatype.SOA) is None:
raise NoSOA
if self.get_rdataset(name, dns.rdatatype.NS) is None:
raise NoNS
class _MasterReader(object):
"""Read a DNS master file
@ivar tok: The tokenizer
@type tok: dns.tokenizer.Tokenizer object
@ivar ttl: The default TTL
@type ttl: int
@ivar last_name: The last name read
@type last_name: dns.name.Name object
@ivar current_origin: The current origin
@type current_origin: dns.name.Name object
@ivar relativize: should names in the zone be relativized?
@type relativize: bool
@ivar zone: the zone
@type zone: dns.zone.Zone object
@ivar saved_state: saved reader state (used when processing $INCLUDE)
@type saved_state: list of (tokenizer, current_origin, last_name, file)
tuples.
@ivar current_file: the file object of the $INCLUDed file being parsed
(None if no $INCLUDE is active).
@ivar allow_include: is $INCLUDE allowed?
@type allow_include: bool
@ivar check_origin: should sanity checks of the origin node be done?
The default is True.
@type check_origin: bool
"""
def __init__(self, tok, origin, rdclass, relativize, zone_factory=Zone,
allow_include=False, check_origin=True):
if isinstance(origin, (str, unicode)):
origin = dns.name.from_text(origin)
self.tok = tok
self.current_origin = origin
self.relativize = relativize
self.ttl = 0
self.last_name = None
self.zone = zone_factory(origin, rdclass, relativize=relativize)
self.saved_state = []
self.current_file = None
self.allow_include = allow_include
self.check_origin = check_origin
def _eat_line(self):
while 1:
(ttype, t) = self.tok.get()
if ttype == dns.tokenizer.EOL or ttype == dns.tokenizer.EOF:
break
def _rr_line(self):
"""Process one line from a DNS master file."""
# Name
if self.current_origin is None:
raise UnknownOrigin
token = self.tok.get(want_leading = True)
if token[0] != dns.tokenizer.WHITESPACE:
self.last_name = dns.name.from_text(token[1], self.current_origin)
else:
token = self.tok.get()
if token[0] == dns.tokenizer.EOL or \
token[0] == dns.tokenizer.EOF:
# treat leading WS followed by EOL/EOF as if they were EOL/EOF.
return
self.tok.unget(token)
name = self.last_name
if not name.is_subdomain(self.zone.origin):
self._eat_line()
return
if self.relativize:
name = name.relativize(self.zone.origin)
token = self.tok.get()
if token[0] != dns.tokenizer.IDENTIFIER:
raise dns.exception.SyntaxError
# TTL
try:
ttl = dns.ttl.from_text(token[1])
token = self.tok.get()
if token[0] != dns.tokenizer.IDENTIFIER:
raise dns.exception.SyntaxError
except dns.ttl.BadTTL:
ttl = self.ttl
# Class
try:
rdclass = dns.rdataclass.from_text(token[1])
token = self.tok.get()
if token[0] != dns.tokenizer.IDENTIFIER:
raise dns.exception.SyntaxError
except dns.exception.SyntaxError:
raise dns.exception.SyntaxError
except:
rdclass = self.zone.rdclass
if rdclass != self.zone.rdclass:
raise dns.exception.SyntaxError, "RR class is not zone's class"
# Type
try:
rdtype = dns.rdatatype.from_text(token[1])
except:
raise dns.exception.SyntaxError, \
"unknown rdatatype '%s'" % token[1]
n = self.zone.nodes.get(name)
if n is None:
n = self.zone.node_factory()
self.zone.nodes[name] = n
try:
rd = dns.rdata.from_text(rdclass, rdtype, self.tok,
self.current_origin, False)
except dns.exception.SyntaxError:
# Catch and reraise.
(ty, va) = sys.exc_info()[:2]
raise ty, va
except:
# All exceptions that occur in the processing of rdata
# are treated as syntax errors. This is not strictly
# correct, but it is correct almost all of the time.
# We convert them to syntax errors so that we can emit
# helpful filename:line info.
(ty, va) = sys.exc_info()[:2]
raise dns.exception.SyntaxError, \
"caught exception %s: %s" % (str(ty), str(va))
rd.choose_relativity(self.zone.origin, self.relativize)
covers = rd.covers()
rds = n.find_rdataset(rdclass, rdtype, covers, True)
rds.add(rd, ttl)
def read(self):
"""Read a DNS master file and build a zone object.
@raises dns.zone.NoSOA: No SOA RR was found at the zone origin
@raises dns.zone.NoNS: No NS RRset was found at the zone origin
"""
try:
while 1:
token = self.tok.get(True, True)
if token[0] == dns.tokenizer.EOF:
if not self.current_file is None:
self.current_file.close()
if len(self.saved_state) > 0:
(self.tok,
self.current_origin,
self.last_name,
self.current_file,
self.ttl) = self.saved_state.pop(-1)
continue
break
elif token[0] == dns.tokenizer.EOL:
continue
elif token[0] == dns.tokenizer.COMMENT:
self.tok.get_eol()
continue
elif token[1][0] == '$':
u = token[1].upper()
if u == '$TTL':
token = self.tok.get()
if token[0] != dns.tokenizer.IDENTIFIER:
raise dns.exception.SyntaxError, "bad $TTL"
self.ttl = dns.ttl.from_text(token[1])
self.tok.get_eol()
elif u == '$ORIGIN':
self.current_origin = self.tok.get_name()
self.tok.get_eol()
if self.zone.origin is None:
self.zone.origin = self.current_origin
elif u == '$INCLUDE' and self.allow_include:
token = self.tok.get()
if token[0] != dns.tokenizer.QUOTED_STRING:
raise dns.exception.SyntaxError, \
"bad filename in $INCLUDE"
filename = token[1]
token = self.tok.get()
if token[0] == dns.tokenizer.IDENTIFIER:
new_origin = dns.name.from_text(token[1], \
self.current_origin)
self.tok.get_eol()
elif token[0] != dns.tokenizer.EOL and \
token[0] != dns.tokenizer.EOF:
raise dns.exception.SyntaxError, \
"bad origin in $INCLUDE"
else:
new_origin = self.current_origin
self.saved_state.append((self.tok,
self.current_origin,
self.last_name,
self.current_file,
self.ttl))
self.current_file = file(filename, 'r')
self.tok = dns.tokenizer.Tokenizer(self.current_file,
filename)
self.current_origin = new_origin
else:
raise dns.exception.SyntaxError, \
"Unknown master file directive '" + u + "'"
continue
self.tok.unget(token)
self._rr_line()
except dns.exception.SyntaxError, detail:
(filename, line_number) = self.tok.where()
if detail is None:
detail = "syntax error"
raise dns.exception.SyntaxError, \
"%s:%d: %s" % (filename, line_number, detail)
# Now that we're done reading, do some basic checking of the zone.
if self.check_origin:
self.zone.check_origin()
def from_text(text, origin = None, rdclass = dns.rdataclass.IN,
relativize = True, zone_factory=Zone, filename=None,
allow_include=False, check_origin=True):
"""Build a zone object from a master file format string.
@param text: the master file format input
@type text: string.
@param origin: The origin of the zone; if not specified, the first
$ORIGIN statement in the master file will determine the origin of the
zone.
@type origin: dns.name.Name object or string
@param rdclass: The zone's rdata class; the default is class IN.
@type rdclass: int
@param relativize: should names be relativized? The default is True
@type relativize: bool
@param zone_factory: The zone factory to use
@type zone_factory: function returning a Zone
@param filename: The filename to emit when describing where an error
occurred; the default is '<string>'.
@type filename: string
@param allow_include: is $INCLUDE allowed?
@type allow_include: bool
@param check_origin: should sanity checks of the origin node be done?
The default is True.
@type check_origin: bool
@raises dns.zone.NoSOA: No SOA RR was found at the zone origin
@raises dns.zone.NoNS: No NS RRset was found at the zone origin
@rtype: dns.zone.Zone object
"""
# 'text' can also be a file, but we don't publish that fact
# since it's an implementation detail. The official file
# interface is from_file().
if filename is None:
filename = '<string>'
tok = dns.tokenizer.Tokenizer(text, filename)
reader = _MasterReader(tok, origin, rdclass, relativize, zone_factory,
allow_include=allow_include,
check_origin=check_origin)
reader.read()
return reader.zone
def from_file(f, origin = None, rdclass = dns.rdataclass.IN,
relativize = True, zone_factory=Zone, filename=None,
allow_include=True, check_origin=True):
"""Read a master file and build a zone object.
@param f: file or string. If I{f} is a string, it is treated
as the name of a file to open.
@param origin: The origin of the zone; if not specified, the first
$ORIGIN statement in the master file will determine the origin of the
zone.
@type origin: dns.name.Name object or string
@param rdclass: The zone's rdata class; the default is class IN.
@type rdclass: int
@param relativize: should names be relativized? The default is True
@type relativize: bool
@param zone_factory: The zone factory to use
@type zone_factory: function returning a Zone
@param filename: The filename to emit when describing where an error
occurred; the default is '<file>', or the value of I{f} if I{f} is a
string.
@type filename: string
@param allow_include: is $INCLUDE allowed?
@type allow_include: bool
@param check_origin: should sanity checks of the origin node be done?
The default is True.
@type check_origin: bool
@raises dns.zone.NoSOA: No SOA RR was found at the zone origin
@raises dns.zone.NoNS: No NS RRset was found at the zone origin
@rtype: dns.zone.Zone object
"""
if sys.hexversion >= 0x02030000:
# allow Unicode filenames; turn on universal newline support
str_type = basestring
opts = 'rU'
else:
str_type = str
opts = 'r'
if isinstance(f, str_type):
if filename is None:
filename = f
f = file(f, opts)
want_close = True
else:
if filename is None:
filename = '<file>'
want_close = False
try:
z = from_text(f, origin, rdclass, relativize, zone_factory,
filename, allow_include, check_origin)
finally:
if want_close:
f.close()
return z
def from_xfr(xfr, zone_factory=Zone, relativize=True):
"""Convert the output of a zone transfer generator into a zone object.
@param xfr: The xfr generator
@type xfr: generator of dns.message.Message objects
@param relativize: should names be relativized? The default is True.
It is essential that the relativize setting matches the one specified
to dns.query.xfr().
@type relativize: bool
@raises dns.zone.NoSOA: No SOA RR was found at the zone origin
@raises dns.zone.NoNS: No NS RRset was found at the zone origin
@rtype: dns.zone.Zone object
"""
z = None
for r in xfr:
if z is None:
if relativize:
origin = r.origin
else:
origin = r.answer[0].name
rdclass = r.answer[0].rdclass
z = zone_factory(origin, rdclass, relativize=relativize)
for rrset in r.answer:
znode = z.nodes.get(rrset.name)
if not znode:
znode = z.node_factory()
z.nodes[rrset.name] = znode
zrds = znode.find_rdataset(rrset.rdclass, rrset.rdtype,
rrset.covers, True)
zrds.update_ttl(rrset.ttl)
for rd in rrset:
rd.choose_relativity(z.origin, relativize)
zrds.add(rd)
z.check_origin()
return z
|
import random
from django.db import models
from ..config import DELETED_VISIBLE_BY_FIELD
from ..managers import SafeDeleteManager
from ..models import SafeDeleteMixin
from .testcase import SafeDeleteTestCase
class OtherModel(models.Model):
pass
class FieldManager(SafeDeleteManager):
_safedelete_visibility = DELETED_VISIBLE_BY_FIELD
class QuerySetModel(SafeDeleteMixin):
other = models.ForeignKey(
OtherModel,
on_delete=models.CASCADE
)
creation_date = models.DateTimeField('Created', auto_now_add=True)
objects = FieldManager()
class QuerySetTestCase(SafeDeleteTestCase):
def setUp(self):
self.other = OtherModel.objects.create()
self.instance = QuerySetModel.objects.create(
other=self.other
)
self.instance.delete()
def test_select_related(self):
with self.assertNumQueries(1):
model = QuerySetModel.objects.select_related(
'other',
).get(
pk=self.instance.pk
)
str(model.other)
def test_filter_get(self):
self.assertRaises(
QuerySetModel.DoesNotExist,
QuerySetModel.objects.filter(
pk=self.instance.pk + 1,
).get,
pk=self.instance.pk
)
def test_filter_filter(self):
self.assertEqual(
QuerySetModel.objects.filter(
pk=self.instance.pk + 1,
).filter(
pk=self.instance.pk
).count(),
0
)
def test_get_field(self):
QuerySetModel.objects.get(
pk=self.instance.pk
)
def test_count(self):
self.assertEqual(
QuerySetModel.objects.count(),
0
)
self.assertEqual(
QuerySetModel.all_objects.count(),
1
)
def test_iterator(self):
self.assertEqual(
len(list(QuerySetModel.objects.iterator())),
0
)
self.assertEqual(
len(list(QuerySetModel.all_objects.iterator())),
1
)
def test_exists(self):
self.assertFalse(
QuerySetModel.objects.filter(
other_id=self.other.id
).exists()
)
self.assertTrue(
QuerySetModel.all_objects.filter(
other_id=self.other.id
).exists()
)
def test_aggregate(self):
self.assertEqual(
QuerySetModel.objects.aggregate(
max_id=models.Max('id')
),
{
'max_id': None
}
)
self.assertEqual(
QuerySetModel.all_objects.aggregate(
max_id=models.Max('id')
),
{
'max_id': self.instance.id
}
)
def test_first(self):
self.assertEqual(
QuerySetModel.objects.filter(id=self.instance.pk).first(),
None)
self.assertEqual(
QuerySetModel.all_objects.filter(id=self.instance.pk).first(),
self.instance)
def test_last(self):
self.assertEqual(
QuerySetModel.objects.filter(id=self.instance.pk).last(),
None)
self.assertEqual(
QuerySetModel.all_objects.filter(id=self.instance.pk).last(),
self.instance)
def test_latest(self):
self.assertRaises(
QuerySetModel.DoesNotExist,
QuerySetModel.objects.filter(id=self.instance.pk).latest,
'creation_date')
self.assertEqual(
QuerySetModel.all_objects.filter(id=self.instance.pk).latest('creation_date'),
self.instance)
def test_earliest(self):
self.assertRaises(
QuerySetModel.DoesNotExist,
QuerySetModel.objects.filter(id=self.instance.pk).earliest,
'creation_date')
self.assertEqual(
QuerySetModel.all_objects.filter(id=self.instance.pk).earliest('creation_date'),
self.instance)
def test_all(self):
amount = random.randint(1, 4)
# Create an other object for more testing
[QuerySetModel.objects.create(other=self.other).delete()
for x in range(amount)]
self.assertEqual(
len(QuerySetModel.objects.all()),
0)
self.assertEqual(
len(QuerySetModel.all_objects.all()),
amount + 1) # Count for the already created instance
def test_all_slicing(self):
amount = random.randint(1, 4)
# Create an other object for more testing
[QuerySetModel.objects.create(other=self.other).delete()
for x in range(amount)]
self.assertEqual(
len(QuerySetModel.objects.all()[:amount]),
0)
self.assertEqual(
len(QuerySetModel.all_objects.all()[1:amount]),
amount - 1)
Add test for reproducing TypeError: _clone() takes exactly 1 argument in django 1.8.* (#92)
import random
from django.db import models
from ..config import DELETED_VISIBLE_BY_FIELD
from ..managers import SafeDeleteManager
from ..models import SafeDeleteMixin
from .testcase import SafeDeleteTestCase
class OtherModel(models.Model):
pass
class FieldManager(SafeDeleteManager):
_safedelete_visibility = DELETED_VISIBLE_BY_FIELD
class QuerySetModel(SafeDeleteMixin):
other = models.ForeignKey(
OtherModel,
on_delete=models.CASCADE
)
creation_date = models.DateTimeField('Created', auto_now_add=True)
objects = FieldManager()
class QuerySetTestCase(SafeDeleteTestCase):
def setUp(self):
self.other = OtherModel.objects.create()
self.instance = QuerySetModel.objects.create(
other=self.other
)
self.instance.delete()
def test_select_related(self):
with self.assertNumQueries(1):
model = QuerySetModel.objects.select_related(
'other',
).get(
pk=self.instance.pk
)
str(model.other)
def test_filter_get(self):
self.assertRaises(
QuerySetModel.DoesNotExist,
QuerySetModel.objects.filter(
pk=self.instance.pk + 1,
).get,
pk=self.instance.pk
)
def test_filter_filter(self):
self.assertEqual(
QuerySetModel.objects.filter(
pk=self.instance.pk + 1,
).filter(
pk=self.instance.pk
).count(),
0
)
def test_get_field(self):
QuerySetModel.objects.get(
pk=self.instance.pk
)
def test_count(self):
self.assertEqual(
QuerySetModel.objects.count(),
0
)
self.assertEqual(
QuerySetModel.all_objects.count(),
1
)
def test_iterator(self):
self.assertEqual(
len(list(QuerySetModel.objects.iterator())),
0
)
self.assertEqual(
len(list(QuerySetModel.all_objects.iterator())),
1
)
def test_exists(self):
self.assertFalse(
QuerySetModel.objects.filter(
other_id=self.other.id
).exists()
)
self.assertTrue(
QuerySetModel.all_objects.filter(
other_id=self.other.id
).exists()
)
def test_aggregate(self):
self.assertEqual(
QuerySetModel.objects.aggregate(
max_id=models.Max('id')
),
{
'max_id': None
}
)
self.assertEqual(
QuerySetModel.all_objects.aggregate(
max_id=models.Max('id')
),
{
'max_id': self.instance.id
}
)
def test_first(self):
self.assertEqual(
QuerySetModel.objects.filter(id=self.instance.pk).first(),
None)
self.assertEqual(
QuerySetModel.all_objects.filter(id=self.instance.pk).first(),
self.instance)
def test_last(self):
self.assertEqual(
QuerySetModel.objects.filter(id=self.instance.pk).last(),
None)
self.assertEqual(
QuerySetModel.all_objects.filter(id=self.instance.pk).last(),
self.instance)
def test_latest(self):
self.assertRaises(
QuerySetModel.DoesNotExist,
QuerySetModel.objects.filter(id=self.instance.pk).latest,
'creation_date')
self.assertEqual(
QuerySetModel.all_objects.filter(id=self.instance.pk).latest('creation_date'),
self.instance)
def test_earliest(self):
self.assertRaises(
QuerySetModel.DoesNotExist,
QuerySetModel.objects.filter(id=self.instance.pk).earliest,
'creation_date')
self.assertEqual(
QuerySetModel.all_objects.filter(id=self.instance.pk).earliest('creation_date'),
self.instance)
def test_all(self):
amount = random.randint(1, 4)
# Create an other object for more testing
[QuerySetModel.objects.create(other=self.other).delete()
for x in range(amount)]
self.assertEqual(
len(QuerySetModel.objects.all()),
0)
self.assertEqual(
len(QuerySetModel.all_objects.all()),
amount + 1) # Count for the already created instance
def test_all_slicing(self):
amount = random.randint(1, 4)
# Create an other object for more testing
[QuerySetModel.objects.create(other=self.other).delete()
for x in range(amount)]
self.assertEqual(
len(QuerySetModel.objects.all()[:amount]),
0)
self.assertEqual(
len(QuerySetModel.all_objects.all()[1:amount]),
amount - 1)
def test_values_list(self):
instance = QuerySetModel.objects.create(
other=self.other
)
self.assertEqual(
1,
len(QuerySetModel.objects
.filter(id=instance.id)
.values_list('pk', flat=True))
)
self.assertEqual(
instance.id,
len(QuerySetModel.objects
.filter(id=instance.id)
.values_list('pk', flat=True)[0])
)
|
fixes typos in docstring
|
# pylint: disable=missing-docstring,no-self-use,no-value-for-parameter,too-many-lines
import logging
import os
import sys
import click
from pkg_resources import parse_version
from ooinstall import openshift_ansible, utils
from ooinstall.oo_config import Host, OOConfig, OOConfigInvalidHostError, Role
from ooinstall.variants import find_variant, get_variant_version_combos
INSTALLER_LOG = logging.getLogger('installer')
INSTALLER_LOG.setLevel(logging.CRITICAL)
INSTALLER_FILE_HANDLER = logging.FileHandler('/tmp/installer.txt')
INSTALLER_FILE_HANDLER.setFormatter(
logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
# Example output:
# 2016-08-23 07:34:58,480 - installer - DEBUG - Going to 'load_system_facts'
INSTALLER_FILE_HANDLER.setLevel(logging.DEBUG)
INSTALLER_LOG.addHandler(INSTALLER_FILE_HANDLER)
DEFAULT_ANSIBLE_CONFIG = '/usr/share/atomic-openshift-utils/ansible.cfg'
QUIET_ANSIBLE_CONFIG = '/usr/share/atomic-openshift-utils/ansible-quiet.cfg'
DEFAULT_PLAYBOOK_DIR = '/usr/share/ansible/openshift-ansible/'
UPGRADE_MAPPINGS = {
'3.6': {
'minor_version': '3.6',
'minor_playbook': 'v3_6/upgrade.yml',
'major_playbook': 'v3_7/upgrade.yml',
'major_version': '3.7',
},
'3.7': {
'minor_version': '3.7',
'minor_playbook': 'v3_7/upgrade.yml',
},
}
def validate_ansible_dir(path):
if not path:
raise click.BadParameter('An Ansible path must be provided')
return path
# if not os.path.exists(path)):
# raise click.BadParameter("Path \"{}\" doesn't exist".format(path))
def validate_prompt_hostname(hostname):
if hostname == '' or utils.is_valid_hostname(hostname):
return hostname
raise click.BadParameter('Invalid hostname. Please double-check this value and re-enter it.')
def get_ansible_ssh_user():
click.clear()
message = """
This installation process involves connecting to remote hosts via ssh. Any
account may be used. However, if a non-root account is used, then it must have
passwordless sudo access.
"""
click.echo(message)
return click.prompt('User for ssh access', default='root')
def get_routingconfig_subdomain():
click.clear()
message = """
You might want to override the default subdomain used for exposed routes. If you don't know what this is, use the default value.
"""
click.echo(message)
return click.prompt('New default subdomain (ENTER for none)', default='')
def collect_hosts(oo_cfg, existing_env=False, masters_set=False, print_summary=True):
"""
Collect host information from user. This will later be filled in using
Ansible.
Returns: a list of host information collected from the user
"""
click.clear()
click.echo('*** Host Configuration ***')
message = """
You must now specify the hosts that will compose your OpenShift cluster.
Please enter an IP address or hostname to connect to for each system in the
cluster. You will then be prompted to identify what role you want this system to
serve in the cluster.
OpenShift masters serve the API and web console and coordinate the jobs to run
across the environment. Optionally, you can specify multiple master systems for
a high-availability (HA) deployment. If you choose an HA deployment, then you
are prompted to identify a *separate* system to act as the load balancer for
your cluster once you define all masters and nodes.
Any masters configured as part of this installation process are also
configured as nodes. This enables the master to proxy to pods
from the API. By default, this node is unschedulable, but this can be changed
after installation with the 'oadm manage-node' command.
OpenShift nodes provide the runtime environments for containers. They host the
required services to be managed by the master.
http://docs.openshift.com/enterprise/latest/architecture/infrastructure_components/kubernetes_infrastructure.html#master
http://docs.openshift.com/enterprise/latest/architecture/infrastructure_components/kubernetes_infrastructure.html#node
"""
click.echo(message)
hosts = []
roles = set(['master', 'node', 'storage', 'etcd'])
more_hosts = True
num_masters = 0
while more_hosts:
host_props = {}
host_props['roles'] = []
host_props['connect_to'] = click.prompt('Enter hostname or IP address',
value_proc=validate_prompt_hostname)
if not masters_set:
if click.confirm('Will this host be an OpenShift master?'):
host_props['roles'].append('master')
host_props['roles'].append('etcd')
num_masters += 1
if oo_cfg.settings['variant_version'] == '3.0':
masters_set = True
host_props['roles'].append('node')
host_props['containerized'] = False
if oo_cfg.settings['variant_version'] != '3.0':
rpm_or_container = \
click.prompt('Will this host be RPM or Container based (rpm/container)?',
type=click.Choice(['rpm', 'container']),
default='rpm')
if rpm_or_container == 'container':
host_props['containerized'] = True
host_props['new_host'] = existing_env
host = Host(**host_props)
hosts.append(host)
if print_summary:
print_installation_summary(hosts, oo_cfg.settings['variant_version'])
# If we have one master, this is enough for an all-in-one deployment,
# thus we can start asking if you want to proceed. Otherwise we assume
# you must.
if masters_set or num_masters != 2:
more_hosts = click.confirm('Do you want to add additional hosts?')
if num_masters > 2:
master_lb = collect_master_lb(hosts)
if master_lb:
hosts.append(master_lb)
roles.add('master_lb')
else:
set_cluster_hostname(oo_cfg)
if not existing_env:
collect_storage_host(hosts)
return hosts, roles
# pylint: disable=too-many-branches
def print_installation_summary(hosts, version=None, verbose=True):
"""
Displays a summary of all hosts configured thus far, and what role each
will play.
Shows total nodes/masters, hints for performing/modifying the deployment
with additional setup, warnings for invalid or sub-optimal configurations.
"""
click.clear()
click.echo('*** Installation Summary ***\n')
click.echo('Hosts:')
for host in hosts:
print_host_summary(hosts, host)
masters = [host for host in hosts if host.is_master()]
nodes = [host for host in hosts if host.is_node()]
dedicated_nodes = [host for host in hosts if host.is_node() and not host.is_master()]
click.echo('')
click.echo('Total OpenShift masters: %s' % len(masters))
click.echo('Total OpenShift nodes: %s' % len(nodes))
if verbose:
if len(masters) == 1 and version != '3.0':
ha_hint_message = """
NOTE: Add a total of 3 or more masters to perform an HA installation."""
click.echo(ha_hint_message)
elif len(masters) == 2:
min_masters_message = """
WARNING: A minimum of 3 masters are required to perform an HA installation.
Please add one more to proceed."""
click.echo(min_masters_message)
elif len(masters) >= 3:
ha_message = """
NOTE: Multiple masters specified, this will be an HA deployment with a separate
etcd cluster. You will be prompted to provide the FQDN of a load balancer and
a host for storage once finished entering hosts.
"""
click.echo(ha_message)
dedicated_nodes_message = """
WARNING: Dedicated nodes are recommended for an HA deployment. If no dedicated
nodes are specified, each configured master will be marked as a schedulable
node."""
min_ha_nodes_message = """
WARNING: A minimum of 3 dedicated nodes are recommended for an HA
deployment."""
if len(dedicated_nodes) == 0:
click.echo(dedicated_nodes_message)
elif len(dedicated_nodes) < 3:
click.echo(min_ha_nodes_message)
click.echo('')
def print_host_summary(all_hosts, host):
click.echo("- %s" % host.connect_to)
if host.is_master():
click.echo(" - OpenShift master")
if host.is_node():
if host.is_dedicated_node():
click.echo(" - OpenShift node (Dedicated)")
elif host.is_schedulable_node(all_hosts):
click.echo(" - OpenShift node")
else:
click.echo(" - OpenShift node (Unscheduled)")
if host.is_master_lb():
if host.preconfigured:
click.echo(" - Load Balancer (Preconfigured)")
else:
click.echo(" - Load Balancer (HAProxy)")
if host.is_etcd():
click.echo(" - Etcd")
if host.is_storage():
click.echo(" - Storage")
if host.new_host:
click.echo(" - NEW")
def collect_master_lb(hosts):
"""
Get a valid load balancer from the user and append it to the list of
hosts.
Ensure user does not specify a system already used as a master/node as
this is an invalid configuration.
"""
message = """
Setting up high-availability masters requires a load balancing solution.
Please provide the FQDN of a host that will be configured as a proxy. This
can be either an existing load balancer configured to balance all masters on
port 8443 or a new host that will have HAProxy installed on it.
If the host provided is not yet configured, a reference HAProxy load
balancer will be installed. It's important to note that while the rest of the
environment will be fault-tolerant, this reference load balancer will not be.
It can be replaced post-installation with a load balancer with the same
hostname.
"""
click.echo(message)
host_props = {}
# Using an embedded function here so we have access to the hosts list:
def validate_prompt_lb(hostname):
# Run the standard hostname check first:
hostname = validate_prompt_hostname(hostname)
# Make sure this host wasn't already specified:
for host in hosts:
if host.connect_to == hostname and (host.is_master() or host.is_node()):
raise click.BadParameter('Cannot re-use "%s" as a load balancer, '
'please specify a separate host' % hostname)
return hostname
lb_hostname = click.prompt('Enter hostname or IP address',
value_proc=validate_prompt_lb)
if lb_hostname:
host_props['connect_to'] = lb_hostname
install_haproxy = \
click.confirm('Should the reference HAProxy load balancer be installed on this host?')
host_props['preconfigured'] = not install_haproxy
host_props['roles'] = ['master_lb']
return Host(**host_props)
else:
return None
def set_cluster_hostname(oo_cfg):
first_master = next((host for host in oo_cfg.deployment.hosts if host.is_master()), None)
message = """
You have chosen to install a single master cluster (non-HA).
In a single master cluster, the cluster host name (Ansible variable openshift_master_cluster_public_hostname) is set by default to the host name of the single master. In a multiple master (HA) cluster, the FQDN of a host must be provided that will be configured as a proxy. This could be either an existing load balancer configured to balance all masters on
port 8443 or a new host that would have HAProxy installed on it.
(Optional)
If you want to override the cluster host name now to something other than the default (the host name of the single master), or if you think you might add masters later to become an HA cluster and want to future proof your cluster host name choice, please provide a FQDN. Otherwise, press ENTER to continue and accept the default.
"""
click.echo(message)
cluster_hostname = click.prompt('Enter hostname or IP address',
default=str(first_master))
oo_cfg.deployment.variables['openshift_master_cluster_hostname'] = cluster_hostname
oo_cfg.deployment.variables['openshift_master_cluster_public_hostname'] = cluster_hostname
def collect_storage_host(hosts):
"""
Get a valid host for storage from the user and append it to the list of
hosts.
"""
message = """
Setting up high-availability masters requires a storage host. Please provide a
host that will be configured as a Registry Storage.
Note: Containerized storage hosts are not currently supported.
"""
click.echo(message)
host_props = {}
first_master = next(host for host in hosts if host.is_master())
hostname_or_ip = click.prompt('Enter hostname or IP address',
value_proc=validate_prompt_hostname,
default=first_master.connect_to)
existing, existing_host = is_host_already_node_or_master(hostname_or_ip, hosts)
if existing and existing_host.is_node():
existing_host.roles.append('storage')
else:
host_props['connect_to'] = hostname_or_ip
host_props['preconfigured'] = False
host_props['roles'] = ['storage']
storage = Host(**host_props)
hosts.append(storage)
def is_host_already_node_or_master(hostname, hosts):
is_existing = False
existing_host = None
for host in hosts:
if host.connect_to == hostname and (host.is_master() or host.is_node()):
is_existing = True
existing_host = host
return is_existing, existing_host
def confirm_hosts_facts(oo_cfg, callback_facts):
hosts = oo_cfg.deployment.hosts
click.clear()
message = """
The following is a list of the facts gathered from the provided hosts. The
hostname for a system inside the cluster is often different from the hostname
that is resolveable from command-line or web clients, therefore these settings
cannot be validated automatically.
For some cloud providers, the installer is able to gather metadata exposed in
the instance, so reasonable defaults will be provided.
Please confirm that they are correct before moving forward.
"""
notes = """
Format:
connect_to,IP,public IP,hostname,public hostname
Notes:
* The installation host is the hostname from the installer's perspective.
* The IP of the host should be the internal IP of the instance.
* The public IP should be the externally accessible IP associated with the instance
* The hostname should resolve to the internal IP from the instances
themselves.
* The public hostname should resolve to the external IP from hosts outside of
the cloud.
"""
# For testing purposes we need to click.echo only once, so build up
# the message:
output = message
default_facts_lines = []
default_facts = {}
for host in hosts:
if host.preconfigured:
continue
try:
default_facts[host.connect_to] = {}
host.ip = callback_facts[host.connect_to]["common"]["ip"]
host.public_ip = callback_facts[host.connect_to]["common"]["public_ip"]
host.hostname = callback_facts[host.connect_to]["common"]["hostname"]
host.public_hostname = callback_facts[host.connect_to]["common"]["public_hostname"]
except KeyError:
click.echo("Problem fetching facts from {}".format(host.connect_to))
continue
default_facts_lines.append(",".join([host.connect_to,
host.ip,
host.public_ip,
host.hostname,
host.public_hostname]))
output = "%s\n%s" % (output, ",".join([host.connect_to,
host.ip,
host.public_ip,
host.hostname,
host.public_hostname]))
output = "%s\n%s" % (output, notes)
click.echo(output)
facts_confirmed = click.confirm("Do the above facts look correct?")
if not facts_confirmed:
message = """
Edit %s with the desired values and run `atomic-openshift-installer --unattended install` to restart the install.
""" % oo_cfg.config_path
click.echo(message)
# Make sure we actually write out the config file.
oo_cfg.save_to_disk()
sys.exit(0)
return default_facts
def check_hosts_config(oo_cfg, unattended):
click.clear()
masters = [host for host in oo_cfg.deployment.hosts if host.is_master()]
if len(masters) == 2:
click.echo("A minimum of 3 masters are required for HA deployments.")
sys.exit(1)
if len(masters) > 1:
master_lb = [host for host in oo_cfg.deployment.hosts if host.is_master_lb()]
if len(master_lb) > 1:
click.echo('ERROR: More than one master load balancer specified. Only one is allowed.')
sys.exit(1)
elif len(master_lb) == 1:
if master_lb[0].is_master() or master_lb[0].is_node():
click.echo('ERROR: The master load balancer is configured as a master or node. '
'Please correct this.')
sys.exit(1)
else:
message = """
ERROR: No master load balancer specified in config. You must provide the FQDN
of a load balancer to balance the API (port 8443) on all master hosts.
https://docs.openshift.org/latest/install_config/install/advanced_install.html#multiple-masters
"""
click.echo(message)
sys.exit(1)
dedicated_nodes = [host for host in oo_cfg.deployment.hosts
if host.is_node() and not host.is_master()]
if len(dedicated_nodes) == 0:
message = """
WARNING: No dedicated nodes specified. By default, colocated masters have
their nodes set to unschedulable. If you proceed all nodes will be labelled
as schedulable.
"""
if unattended:
click.echo(message)
else:
confirm_continue(message)
return
def get_variant_and_version(multi_master=False):
message = "\nWhich variant would you like to install?\n\n"
i = 1
combos = get_variant_version_combos()
for (variant, _) in combos:
message = "%s\n(%s) %s" % (message, i, variant.description)
i = i + 1
message = "%s\n" % message
click.echo(message)
if multi_master:
click.echo('NOTE: 3.0 installations are not')
response = click.prompt("Choose a variant from above: ", default=1)
product, version = combos[response - 1]
return product, version
def confirm_continue(message):
if message:
click.echo(message)
click.confirm("Are you ready to continue?", default=False, abort=True)
return
def error_if_missing_info(oo_cfg):
missing_info = False
if not oo_cfg.deployment.hosts:
missing_info = True
click.echo('For unattended installs, hosts must be specified on the '
'command line or in the config file: %s' % oo_cfg.config_path)
sys.exit(1)
if 'ansible_ssh_user' not in oo_cfg.deployment.variables:
click.echo("Must specify ansible_ssh_user in configuration file.")
sys.exit(1)
# Lookup a variant based on the key we were given:
if not oo_cfg.settings['variant']:
click.echo("No variant specified in configuration file.")
sys.exit(1)
ver = None
if 'variant_version' in oo_cfg.settings:
ver = oo_cfg.settings['variant_version']
variant, version = find_variant(oo_cfg.settings['variant'], version=ver)
if variant is None or version is None:
err_variant_name = oo_cfg.settings['variant']
if ver:
err_variant_name = "%s %s" % (err_variant_name, ver)
click.echo("%s is not an installable variant." % err_variant_name)
sys.exit(1)
oo_cfg.settings['variant_version'] = version.name
# check that all listed host roles are included
listed_roles = oo_cfg.get_host_roles_set()
configured_roles = set([role for role in oo_cfg.deployment.roles])
if listed_roles != configured_roles:
missing_info = True
click.echo('Any roles assigned to hosts must be defined.')
if missing_info:
sys.exit(1)
def get_proxy_hosts_excludes():
message = """
If a proxy is needed to reach HTTP and HTTPS traffic, please enter the
name below. This proxy will be configured by default for all processes
that need to reach systems outside the cluster. An example proxy value
would be:
http://proxy.example.com:8080/
More advanced configuration is possible if using Ansible directly:
https://docs.openshift.com/enterprise/latest/install_config/http_proxies.html
"""
click.echo(message)
message = "Specify your http proxy ? (ENTER for none)"
http_proxy_hostname = click.prompt(message, default='')
# TODO: Fix this prompt message and behavior. 'ENTER' will default
# to the http_proxy_hostname if one was provided
message = "Specify your https proxy ? (ENTER for none)"
https_proxy_hostname = click.prompt(message, default=http_proxy_hostname)
if http_proxy_hostname or https_proxy_hostname:
message = """
All hosts in your OpenShift inventory will automatically be added to the NO_PROXY value.
Please provide any additional hosts to be added to NO_PROXY. (ENTER for none)
"""
proxy_excludes = click.prompt(message, default='')
else:
proxy_excludes = ''
return http_proxy_hostname, https_proxy_hostname, proxy_excludes
def get_missing_info_from_user(oo_cfg):
""" Prompts the user for any information missing from the given configuration. """
click.clear()
message = """
Welcome to the OpenShift Enterprise 3 installation.
Please confirm that following prerequisites have been met:
* All systems where OpenShift will be installed are running Red Hat Enterprise
Linux 7.
* All systems are properly subscribed to the required OpenShift Enterprise 3
repositories.
* All systems have run docker-storage-setup (part of the Red Hat docker RPM).
* All systems have working DNS that resolves not only from the perspective of
the installer, but also from within the cluster.
When the process completes you will have a default configuration for masters
and nodes. For ongoing environment maintenance it's recommended that the
official Ansible playbooks be used.
For more information on installation prerequisites please see:
https://docs.openshift.com/enterprise/latest/admin_guide/install/prerequisites.html
"""
confirm_continue(message)
click.clear()
if not oo_cfg.deployment.variables.get('ansible_ssh_user', False):
oo_cfg.deployment.variables['ansible_ssh_user'] = get_ansible_ssh_user()
click.clear()
if not oo_cfg.settings.get('variant', ''):
variant, version = get_variant_and_version()
oo_cfg.settings['variant'] = variant.name
oo_cfg.settings['variant_version'] = version.name
oo_cfg.settings['variant_subtype'] = version.subtype
click.clear()
if not oo_cfg.deployment.hosts:
oo_cfg.deployment.hosts, roles = collect_hosts(oo_cfg)
set_infra_nodes(oo_cfg.deployment.hosts)
for role in roles:
oo_cfg.deployment.roles[role] = Role(name=role, variables={})
click.clear()
if 'master_routingconfig_subdomain' not in oo_cfg.deployment.variables:
oo_cfg.deployment.variables['master_routingconfig_subdomain'] = \
get_routingconfig_subdomain()
click.clear()
# Are any proxy vars already presisted?
proxy_vars = ['proxy_exclude_hosts', 'proxy_https', 'proxy_http']
# Empty list if NO proxy vars were presisted
saved_proxy_vars = [pv for pv in proxy_vars
if oo_cfg.deployment.variables.get(pv, 'UNSET') is not 'UNSET']
INSTALLER_LOG.debug("Evaluated proxy settings, found %s presisted values",
len(saved_proxy_vars))
current_version = parse_version(
oo_cfg.settings.get('variant_version', '0.0'))
min_version = parse_version('3.2')
# No proxy vars were saved and we are running a version which
# recognizes proxy parameters. We must prompt the user for values
# if this conditional is true.
if not saved_proxy_vars and current_version >= min_version:
INSTALLER_LOG.debug("Prompting user to enter proxy values")
http_proxy, https_proxy, proxy_excludes = get_proxy_hosts_excludes()
oo_cfg.deployment.variables['proxy_http'] = http_proxy
oo_cfg.deployment.variables['proxy_https'] = https_proxy
oo_cfg.deployment.variables['proxy_exclude_hosts'] = proxy_excludes
click.clear()
return oo_cfg
def collect_new_nodes(oo_cfg):
click.clear()
click.echo('*** New Node Configuration ***')
message = """
Add new nodes here
"""
click.echo(message)
new_nodes, _ = collect_hosts(oo_cfg, existing_env=True, masters_set=True, print_summary=False)
return new_nodes
def get_installed_hosts(hosts, callback_facts):
installed_hosts = []
uninstalled_hosts = []
for host in [h for h in hosts if h.is_master() or h.is_node()]:
if host.connect_to in callback_facts.keys():
if is_installed_host(host, callback_facts):
INSTALLER_LOG.debug("%s is already installed", str(host))
installed_hosts.append(host)
else:
INSTALLER_LOG.debug("%s is not installed", str(host))
uninstalled_hosts.append(host)
return installed_hosts, uninstalled_hosts
def is_installed_host(host, callback_facts):
version_found = 'common' in callback_facts[host.connect_to].keys() and \
callback_facts[host.connect_to]['common'].get('version', '') and \
callback_facts[host.connect_to]['common'].get('version', '') != 'None'
return version_found
def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force):
"""
We get here once there are hosts in oo_cfg and we need to find out what
state they are in. There are several different cases that might occur:
1. All hosts in oo_cfg are uninstalled. In this case, we should proceed
with a normal installation.
2. All hosts in oo_cfg are installed. In this case, ask the user if they
want to force reinstall or exit. We can also hint in this case about
the scaleup workflow.
3. Some hosts are installed and some are uninstalled. In this case, prompt
the user if they want to force (re)install all hosts specified or direct
them to the scaleup workflow and exit.
"""
hosts_to_run_on = []
# Check if master or nodes already have something installed
installed_hosts, uninstalled_hosts = get_installed_hosts(oo_cfg.deployment.hosts,
callback_facts)
nodes = [host for host in oo_cfg.deployment.hosts if host.is_node()]
masters_and_nodes = [host for host in oo_cfg.deployment.hosts if host.is_master() or host.is_node()]
in_hosts = [str(h) for h in installed_hosts]
un_hosts = [str(h) for h in uninstalled_hosts]
all_hosts = [str(h) for h in oo_cfg.deployment.hosts]
m_and_n = [str(h) for h in masters_and_nodes]
INSTALLER_LOG.debug("installed hosts: %s", ", ".join(in_hosts))
INSTALLER_LOG.debug("uninstalled hosts: %s", ", ".join(un_hosts))
INSTALLER_LOG.debug("deployment hosts: %s", ", ".join(all_hosts))
INSTALLER_LOG.debug("masters and nodes: %s", ", ".join(m_and_n))
# Case (1): All uninstalled hosts
if len(uninstalled_hosts) == len(nodes):
click.echo('All hosts in config are uninstalled. Proceeding with installation...')
hosts_to_run_on = list(oo_cfg.deployment.hosts)
else:
# Case (2): All installed hosts
if len(installed_hosts) == len(masters_and_nodes):
message = """
All specified hosts in specified environment are installed.
"""
# Case (3): Some installed, some uninstalled
else:
message = """
A mix of installed and uninstalled hosts have been detected in your environment.
Please make sure your environment was installed successfully before adding new nodes.
"""
# Still inside the case 2/3 else condition
mixed_msg = """
\tInstalled hosts:
\t\t{inst_hosts}
\tUninstalled hosts:
\t\t{uninst_hosts}""".format(inst_hosts=", ".join(in_hosts), uninst_hosts=", ".join(un_hosts))
click.echo(mixed_msg)
# Out of the case 2/3 if/else
click.echo(message)
if not unattended:
response = click.confirm('Do you want to (re)install the environment?\n\n'
'Note: This will potentially erase any custom changes.')
if response:
hosts_to_run_on = list(oo_cfg.deployment.hosts)
force = True
elif unattended and force:
hosts_to_run_on = list(oo_cfg.deployment.hosts)
if not force:
message = """
If you want to force reinstall of your environment, run:
`atomic-openshift-installer install --force`
If you want to add new nodes to this environment, run:
`atomic-openshift-installer scaleup`
"""
click.echo(message)
sys.exit(1)
return hosts_to_run_on, callback_facts
def set_infra_nodes(hosts):
if all(host.is_master() for host in hosts):
infra_list = hosts
else:
nodes_list = [host for host in hosts if host.is_schedulable_node(hosts)]
infra_list = nodes_list[:2]
for host in infra_list:
host.node_labels = "{'region': 'infra'}"
def run_config_playbook(oo_cfg, hosts_to_run_on, unattended, verbose, gen_inventory):
# Write Ansible inventory file to disk:
inventory_file = openshift_ansible.generate_inventory(hosts_to_run_on)
click.echo()
click.echo('Wrote atomic-openshift-installer config: %s' % oo_cfg.config_path)
click.echo("Wrote Ansible inventory: %s" % inventory_file)
click.echo()
if gen_inventory:
sys.exit(0)
click.echo('Ready to run installation process.')
message = """
If changes are needed please edit the installer.cfg.yml config file above and re-run.
"""
if not unattended:
confirm_continue(message)
error = openshift_ansible.run_main_playbook(inventory_file, oo_cfg.deployment.hosts,
hosts_to_run_on, verbose)
if error:
# The bootstrap script will print out the log location.
message = """
An error was detected. After resolving the problem please relaunch the
installation process.
"""
click.echo(message)
sys.exit(1)
else:
message = """
The installation was successful!
If this is your first time installing please take a look at the Administrator
Guide for advanced options related to routing, storage, authentication, and
more:
http://docs.openshift.com/enterprise/latest/admin_guide/overview.html
"""
click.echo(message)
@click.group()
@click.pass_context
@click.option('--unattended', '-u', is_flag=True, default=False)
@click.option('--configuration', '-c',
type=click.Path(file_okay=True,
dir_okay=False,
writable=True,
readable=True),
default=None)
@click.option('--ansible-playbook-directory',
'-a',
type=click.Path(exists=True,
file_okay=False,
dir_okay=True,
readable=True),
# callback=validate_ansible_dir,
default=DEFAULT_PLAYBOOK_DIR,
envvar='OO_ANSIBLE_PLAYBOOK_DIRECTORY')
@click.option('--ansible-log-path',
type=click.Path(file_okay=True,
dir_okay=False,
writable=True,
readable=True),
default="/tmp/ansible.log")
@click.option('-v', '--verbose',
is_flag=True, default=False)
@click.option('-d', '--debug',
help="Enable installer debugging (/tmp/installer.log)",
is_flag=True, default=False)
@click.help_option('--help', '-h')
# pylint: disable=too-many-arguments
# pylint: disable=line-too-long
# Main CLI entrypoint, not much we can do about too many arguments.
def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_log_path, verbose, debug):
"""
atomic-openshift-installer makes the process for installing OSE or AEP
easier by interactively gathering the data needed to run on each host.
It can also be run in unattended mode if provided with a configuration file.
Further reading: https://docs.openshift.com/enterprise/latest/install_config/install/quick_install.html
"""
if debug:
# DEFAULT log level threshold is set to CRITICAL (the
# highest), anything below that (we only use debug/warning
# presently) is not logged. If '-d' is given though, we'll
# lower the threshold to debug (almost everything gets through)
INSTALLER_LOG.setLevel(logging.DEBUG)
INSTALLER_LOG.debug("Quick Installer debugging initialized")
ctx.obj = {}
ctx.obj['unattended'] = unattended
ctx.obj['configuration'] = configuration
ctx.obj['ansible_log_path'] = ansible_log_path
ctx.obj['verbose'] = verbose
try:
oo_cfg = OOConfig(ctx.obj['configuration'])
except OOConfigInvalidHostError as err:
click.echo(err)
sys.exit(1)
# If no playbook dir on the CLI, check the config:
if not ansible_playbook_directory:
ansible_playbook_directory = oo_cfg.settings.get('ansible_playbook_directory', '')
# If still no playbook dir, check for the default location:
if not ansible_playbook_directory and os.path.exists(DEFAULT_PLAYBOOK_DIR):
ansible_playbook_directory = DEFAULT_PLAYBOOK_DIR
validate_ansible_dir(ansible_playbook_directory)
oo_cfg.settings['ansible_playbook_directory'] = ansible_playbook_directory
oo_cfg.ansible_playbook_directory = ansible_playbook_directory
ctx.obj['ansible_playbook_directory'] = ansible_playbook_directory
if os.path.exists(DEFAULT_ANSIBLE_CONFIG):
# If we're installed by RPM this file should exist and we can use it as our default:
oo_cfg.settings['ansible_config'] = DEFAULT_ANSIBLE_CONFIG
if not verbose and os.path.exists(QUIET_ANSIBLE_CONFIG):
oo_cfg.settings['ansible_quiet_config'] = QUIET_ANSIBLE_CONFIG
oo_cfg.settings['ansible_log_path'] = ctx.obj['ansible_log_path']
ctx.obj['oo_cfg'] = oo_cfg
openshift_ansible.set_config(oo_cfg)
@click.command()
@click.pass_context
def uninstall(ctx):
oo_cfg = ctx.obj['oo_cfg']
verbose = ctx.obj['verbose']
if hasattr(oo_cfg, 'deployment'):
hosts = oo_cfg.deployment.hosts
elif hasattr(oo_cfg, 'hosts'):
hosts = oo_cfg.hosts
else:
click.echo("No hosts defined in: %s" % oo_cfg.config_path)
sys.exit(1)
click.echo("OpenShift will be uninstalled from the following hosts:\n")
if not ctx.obj['unattended']:
# Prompt interactively to confirm:
for host in hosts:
click.echo(" * %s" % host.connect_to)
proceed = click.confirm("\nDo you want to proceed?")
if not proceed:
click.echo("Uninstall cancelled.")
sys.exit(0)
openshift_ansible.run_uninstall_playbook(hosts, verbose)
@click.command()
@click.option('--latest-minor', '-l', is_flag=True, default=False)
@click.option('--next-major', '-n', is_flag=True, default=False)
@click.pass_context
# pylint: disable=too-many-statements,too-many-branches
def upgrade(ctx, latest_minor, next_major):
click.echo("Upgrades are no longer supported by this version of installer")
click.echo("Please see the documentation for manual upgrade:")
click.echo("https://docs.openshift.com/container-platform/latest/install_config/upgrading/automated_upgrades.html")
sys.exit(1)
@click.command()
@click.option('--force', '-f', is_flag=True, default=False)
@click.option('--gen-inventory', is_flag=True, default=False,
help="Generate an Ansible inventory file and exit.")
@click.pass_context
def install(ctx, force, gen_inventory):
oo_cfg = ctx.obj['oo_cfg']
verbose = ctx.obj['verbose']
unattended = ctx.obj['unattended']
if unattended:
error_if_missing_info(oo_cfg)
else:
oo_cfg = get_missing_info_from_user(oo_cfg)
check_hosts_config(oo_cfg, unattended)
print_installation_summary(oo_cfg.deployment.hosts,
oo_cfg.settings.get('variant_version', None))
click.echo('Gathering information from hosts...')
callback_facts, error = openshift_ansible.default_facts(oo_cfg.deployment.hosts,
verbose)
if error or callback_facts is None:
click.echo("There was a problem fetching the required information. "
"Please see {} for details.".format(oo_cfg.settings['ansible_log_path']))
sys.exit(1)
hosts_to_run_on, callback_facts = get_hosts_to_run_on(oo_cfg,
callback_facts,
unattended,
force)
# We already verified this is not the case for unattended installs, so this can
# only trigger for live CLI users:
if not ctx.obj['unattended'] and len(oo_cfg.calc_missing_facts()) > 0:
confirm_hosts_facts(oo_cfg, callback_facts)
# Write quick installer config file to disk:
oo_cfg.save_to_disk()
run_config_playbook(oo_cfg, hosts_to_run_on, unattended, verbose, gen_inventory)
@click.command()
@click.option('--gen-inventory', is_flag=True, default=False,
help="Generate an Ansible inventory file and exit.")
@click.pass_context
def scaleup(ctx, gen_inventory):
oo_cfg = ctx.obj['oo_cfg']
verbose = ctx.obj['verbose']
unattended = ctx.obj['unattended']
installed_hosts = list(oo_cfg.deployment.hosts)
if len(installed_hosts) == 0:
click.echo('No hosts specified.')
sys.exit(1)
click.echo('Welcome to the OpenShift Enterprise 3 Scaleup utility.')
# Scaleup requires manual data entry. Therefore, we do not support
# unattended operations.
if unattended:
msg = """
---
The 'scaleup' operation does not support unattended
functionality. Re-run the installer without the '-u' or '--unattended'
option to continue.
"""
click.echo(msg)
sys.exit(1)
# Resume normal scaleup workflow
print_installation_summary(installed_hosts,
oo_cfg.settings['variant_version'],
verbose=False,)
message = """
---
We have detected this previously installed OpenShift environment.
This tool will guide you through the process of adding additional
nodes to your cluster.
"""
confirm_continue(message)
error_if_missing_info(oo_cfg)
check_hosts_config(oo_cfg, True)
installed_masters = [host for host in installed_hosts if host.is_master()]
new_nodes = collect_new_nodes(oo_cfg)
oo_cfg.deployment.hosts.extend(new_nodes)
hosts_to_run_on = installed_masters + new_nodes
openshift_ansible.set_config(oo_cfg)
click.echo('Gathering information from hosts...')
callback_facts, error = openshift_ansible.default_facts(oo_cfg.deployment.hosts, verbose)
if error or callback_facts is None:
click.echo("There was a problem fetching the required information. See "
"{} for details.".format(oo_cfg.settings['ansible_log_path']))
sys.exit(1)
print_installation_summary(oo_cfg.deployment.hosts,
oo_cfg.settings.get('variant_version', None))
click.echo('Gathering information from hosts...')
callback_facts, error = openshift_ansible.default_facts(oo_cfg.deployment.hosts,
verbose)
if error or callback_facts is None:
click.echo("There was a problem fetching the required information. "
"Please see {} for details.".format(oo_cfg.settings['ansible_log_path']))
sys.exit(1)
# We already verified this is not the case for unattended installs, so this can
# only trigger for live CLI users:
if not ctx.obj['unattended'] and len(oo_cfg.calc_missing_facts()) > 0:
confirm_hosts_facts(oo_cfg, callback_facts)
# Write quick installer config file to disk:
oo_cfg.save_to_disk()
run_config_playbook(oo_cfg, hosts_to_run_on, unattended, verbose, gen_inventory)
cli.add_command(install)
cli.add_command(scaleup)
cli.add_command(upgrade)
cli.add_command(uninstall)
if __name__ == '__main__':
# This is expected behaviour for context passing with click library:
# pylint: disable=unexpected-keyword-arg
cli(obj={})
installer: increase content width for commands, which may output URLs
# pylint: disable=missing-docstring,no-self-use,no-value-for-parameter,too-many-lines
import logging
import os
import sys
import click
from pkg_resources import parse_version
from ooinstall import openshift_ansible, utils
from ooinstall.oo_config import Host, OOConfig, OOConfigInvalidHostError, Role
from ooinstall.variants import find_variant, get_variant_version_combos
INSTALLER_LOG = logging.getLogger('installer')
INSTALLER_LOG.setLevel(logging.CRITICAL)
INSTALLER_FILE_HANDLER = logging.FileHandler('/tmp/installer.txt')
INSTALLER_FILE_HANDLER.setFormatter(
logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
# Example output:
# 2016-08-23 07:34:58,480 - installer - DEBUG - Going to 'load_system_facts'
INSTALLER_FILE_HANDLER.setLevel(logging.DEBUG)
INSTALLER_LOG.addHandler(INSTALLER_FILE_HANDLER)
DEFAULT_ANSIBLE_CONFIG = '/usr/share/atomic-openshift-utils/ansible.cfg'
QUIET_ANSIBLE_CONFIG = '/usr/share/atomic-openshift-utils/ansible-quiet.cfg'
DEFAULT_PLAYBOOK_DIR = '/usr/share/ansible/openshift-ansible/'
UPGRADE_MAPPINGS = {
'3.6': {
'minor_version': '3.6',
'minor_playbook': 'v3_6/upgrade.yml',
'major_playbook': 'v3_7/upgrade.yml',
'major_version': '3.7',
},
'3.7': {
'minor_version': '3.7',
'minor_playbook': 'v3_7/upgrade.yml',
},
}
def validate_ansible_dir(path):
if not path:
raise click.BadParameter('An Ansible path must be provided')
return path
# if not os.path.exists(path)):
# raise click.BadParameter("Path \"{}\" doesn't exist".format(path))
def validate_prompt_hostname(hostname):
if hostname == '' or utils.is_valid_hostname(hostname):
return hostname
raise click.BadParameter('Invalid hostname. Please double-check this value and re-enter it.')
def get_ansible_ssh_user():
click.clear()
message = """
This installation process involves connecting to remote hosts via ssh. Any
account may be used. However, if a non-root account is used, then it must have
passwordless sudo access.
"""
click.echo(message)
return click.prompt('User for ssh access', default='root')
def get_routingconfig_subdomain():
click.clear()
message = """
You might want to override the default subdomain used for exposed routes. If you don't know what this is, use the default value.
"""
click.echo(message)
return click.prompt('New default subdomain (ENTER for none)', default='')
def collect_hosts(oo_cfg, existing_env=False, masters_set=False, print_summary=True):
"""
Collect host information from user. This will later be filled in using
Ansible.
Returns: a list of host information collected from the user
"""
click.clear()
click.echo('*** Host Configuration ***')
message = """
You must now specify the hosts that will compose your OpenShift cluster.
Please enter an IP address or hostname to connect to for each system in the
cluster. You will then be prompted to identify what role you want this system to
serve in the cluster.
OpenShift masters serve the API and web console and coordinate the jobs to run
across the environment. Optionally, you can specify multiple master systems for
a high-availability (HA) deployment. If you choose an HA deployment, then you
are prompted to identify a *separate* system to act as the load balancer for
your cluster once you define all masters and nodes.
Any masters configured as part of this installation process are also
configured as nodes. This enables the master to proxy to pods
from the API. By default, this node is unschedulable, but this can be changed
after installation with the 'oadm manage-node' command.
OpenShift nodes provide the runtime environments for containers. They host the
required services to be managed by the master.
http://docs.openshift.com/enterprise/latest/architecture/infrastructure_components/kubernetes_infrastructure.html#master
http://docs.openshift.com/enterprise/latest/architecture/infrastructure_components/kubernetes_infrastructure.html#node
"""
click.echo(message)
hosts = []
roles = set(['master', 'node', 'storage', 'etcd'])
more_hosts = True
num_masters = 0
while more_hosts:
host_props = {}
host_props['roles'] = []
host_props['connect_to'] = click.prompt('Enter hostname or IP address',
value_proc=validate_prompt_hostname)
if not masters_set:
if click.confirm('Will this host be an OpenShift master?'):
host_props['roles'].append('master')
host_props['roles'].append('etcd')
num_masters += 1
if oo_cfg.settings['variant_version'] == '3.0':
masters_set = True
host_props['roles'].append('node')
host_props['containerized'] = False
if oo_cfg.settings['variant_version'] != '3.0':
rpm_or_container = \
click.prompt('Will this host be RPM or Container based (rpm/container)?',
type=click.Choice(['rpm', 'container']),
default='rpm')
if rpm_or_container == 'container':
host_props['containerized'] = True
host_props['new_host'] = existing_env
host = Host(**host_props)
hosts.append(host)
if print_summary:
print_installation_summary(hosts, oo_cfg.settings['variant_version'])
# If we have one master, this is enough for an all-in-one deployment,
# thus we can start asking if you want to proceed. Otherwise we assume
# you must.
if masters_set or num_masters != 2:
more_hosts = click.confirm('Do you want to add additional hosts?')
if num_masters > 2:
master_lb = collect_master_lb(hosts)
if master_lb:
hosts.append(master_lb)
roles.add('master_lb')
else:
set_cluster_hostname(oo_cfg)
if not existing_env:
collect_storage_host(hosts)
return hosts, roles
# pylint: disable=too-many-branches
def print_installation_summary(hosts, version=None, verbose=True):
"""
Displays a summary of all hosts configured thus far, and what role each
will play.
Shows total nodes/masters, hints for performing/modifying the deployment
with additional setup, warnings for invalid or sub-optimal configurations.
"""
click.clear()
click.echo('*** Installation Summary ***\n')
click.echo('Hosts:')
for host in hosts:
print_host_summary(hosts, host)
masters = [host for host in hosts if host.is_master()]
nodes = [host for host in hosts if host.is_node()]
dedicated_nodes = [host for host in hosts if host.is_node() and not host.is_master()]
click.echo('')
click.echo('Total OpenShift masters: %s' % len(masters))
click.echo('Total OpenShift nodes: %s' % len(nodes))
if verbose:
if len(masters) == 1 and version != '3.0':
ha_hint_message = """
NOTE: Add a total of 3 or more masters to perform an HA installation."""
click.echo(ha_hint_message)
elif len(masters) == 2:
min_masters_message = """
WARNING: A minimum of 3 masters are required to perform an HA installation.
Please add one more to proceed."""
click.echo(min_masters_message)
elif len(masters) >= 3:
ha_message = """
NOTE: Multiple masters specified, this will be an HA deployment with a separate
etcd cluster. You will be prompted to provide the FQDN of a load balancer and
a host for storage once finished entering hosts.
"""
click.echo(ha_message)
dedicated_nodes_message = """
WARNING: Dedicated nodes are recommended for an HA deployment. If no dedicated
nodes are specified, each configured master will be marked as a schedulable
node."""
min_ha_nodes_message = """
WARNING: A minimum of 3 dedicated nodes are recommended for an HA
deployment."""
if len(dedicated_nodes) == 0:
click.echo(dedicated_nodes_message)
elif len(dedicated_nodes) < 3:
click.echo(min_ha_nodes_message)
click.echo('')
def print_host_summary(all_hosts, host):
click.echo("- %s" % host.connect_to)
if host.is_master():
click.echo(" - OpenShift master")
if host.is_node():
if host.is_dedicated_node():
click.echo(" - OpenShift node (Dedicated)")
elif host.is_schedulable_node(all_hosts):
click.echo(" - OpenShift node")
else:
click.echo(" - OpenShift node (Unscheduled)")
if host.is_master_lb():
if host.preconfigured:
click.echo(" - Load Balancer (Preconfigured)")
else:
click.echo(" - Load Balancer (HAProxy)")
if host.is_etcd():
click.echo(" - Etcd")
if host.is_storage():
click.echo(" - Storage")
if host.new_host:
click.echo(" - NEW")
def collect_master_lb(hosts):
"""
Get a valid load balancer from the user and append it to the list of
hosts.
Ensure user does not specify a system already used as a master/node as
this is an invalid configuration.
"""
message = """
Setting up high-availability masters requires a load balancing solution.
Please provide the FQDN of a host that will be configured as a proxy. This
can be either an existing load balancer configured to balance all masters on
port 8443 or a new host that will have HAProxy installed on it.
If the host provided is not yet configured, a reference HAProxy load
balancer will be installed. It's important to note that while the rest of the
environment will be fault-tolerant, this reference load balancer will not be.
It can be replaced post-installation with a load balancer with the same
hostname.
"""
click.echo(message)
host_props = {}
# Using an embedded function here so we have access to the hosts list:
def validate_prompt_lb(hostname):
# Run the standard hostname check first:
hostname = validate_prompt_hostname(hostname)
# Make sure this host wasn't already specified:
for host in hosts:
if host.connect_to == hostname and (host.is_master() or host.is_node()):
raise click.BadParameter('Cannot re-use "%s" as a load balancer, '
'please specify a separate host' % hostname)
return hostname
lb_hostname = click.prompt('Enter hostname or IP address',
value_proc=validate_prompt_lb)
if lb_hostname:
host_props['connect_to'] = lb_hostname
install_haproxy = \
click.confirm('Should the reference HAProxy load balancer be installed on this host?')
host_props['preconfigured'] = not install_haproxy
host_props['roles'] = ['master_lb']
return Host(**host_props)
else:
return None
def set_cluster_hostname(oo_cfg):
first_master = next((host for host in oo_cfg.deployment.hosts if host.is_master()), None)
message = """
You have chosen to install a single master cluster (non-HA).
In a single master cluster, the cluster host name (Ansible variable openshift_master_cluster_public_hostname) is set by default to the host name of the single master. In a multiple master (HA) cluster, the FQDN of a host must be provided that will be configured as a proxy. This could be either an existing load balancer configured to balance all masters on
port 8443 or a new host that would have HAProxy installed on it.
(Optional)
If you want to override the cluster host name now to something other than the default (the host name of the single master), or if you think you might add masters later to become an HA cluster and want to future proof your cluster host name choice, please provide a FQDN. Otherwise, press ENTER to continue and accept the default.
"""
click.echo(message)
cluster_hostname = click.prompt('Enter hostname or IP address',
default=str(first_master))
oo_cfg.deployment.variables['openshift_master_cluster_hostname'] = cluster_hostname
oo_cfg.deployment.variables['openshift_master_cluster_public_hostname'] = cluster_hostname
def collect_storage_host(hosts):
"""
Get a valid host for storage from the user and append it to the list of
hosts.
"""
message = """
Setting up high-availability masters requires a storage host. Please provide a
host that will be configured as a Registry Storage.
Note: Containerized storage hosts are not currently supported.
"""
click.echo(message)
host_props = {}
first_master = next(host for host in hosts if host.is_master())
hostname_or_ip = click.prompt('Enter hostname or IP address',
value_proc=validate_prompt_hostname,
default=first_master.connect_to)
existing, existing_host = is_host_already_node_or_master(hostname_or_ip, hosts)
if existing and existing_host.is_node():
existing_host.roles.append('storage')
else:
host_props['connect_to'] = hostname_or_ip
host_props['preconfigured'] = False
host_props['roles'] = ['storage']
storage = Host(**host_props)
hosts.append(storage)
def is_host_already_node_or_master(hostname, hosts):
is_existing = False
existing_host = None
for host in hosts:
if host.connect_to == hostname and (host.is_master() or host.is_node()):
is_existing = True
existing_host = host
return is_existing, existing_host
def confirm_hosts_facts(oo_cfg, callback_facts):
hosts = oo_cfg.deployment.hosts
click.clear()
message = """
The following is a list of the facts gathered from the provided hosts. The
hostname for a system inside the cluster is often different from the hostname
that is resolveable from command-line or web clients, therefore these settings
cannot be validated automatically.
For some cloud providers, the installer is able to gather metadata exposed in
the instance, so reasonable defaults will be provided.
Please confirm that they are correct before moving forward.
"""
notes = """
Format:
connect_to,IP,public IP,hostname,public hostname
Notes:
* The installation host is the hostname from the installer's perspective.
* The IP of the host should be the internal IP of the instance.
* The public IP should be the externally accessible IP associated with the instance
* The hostname should resolve to the internal IP from the instances
themselves.
* The public hostname should resolve to the external IP from hosts outside of
the cloud.
"""
# For testing purposes we need to click.echo only once, so build up
# the message:
output = message
default_facts_lines = []
default_facts = {}
for host in hosts:
if host.preconfigured:
continue
try:
default_facts[host.connect_to] = {}
host.ip = callback_facts[host.connect_to]["common"]["ip"]
host.public_ip = callback_facts[host.connect_to]["common"]["public_ip"]
host.hostname = callback_facts[host.connect_to]["common"]["hostname"]
host.public_hostname = callback_facts[host.connect_to]["common"]["public_hostname"]
except KeyError:
click.echo("Problem fetching facts from {}".format(host.connect_to))
continue
default_facts_lines.append(",".join([host.connect_to,
host.ip,
host.public_ip,
host.hostname,
host.public_hostname]))
output = "%s\n%s" % (output, ",".join([host.connect_to,
host.ip,
host.public_ip,
host.hostname,
host.public_hostname]))
output = "%s\n%s" % (output, notes)
click.echo(output)
facts_confirmed = click.confirm("Do the above facts look correct?")
if not facts_confirmed:
message = """
Edit %s with the desired values and run `atomic-openshift-installer --unattended install` to restart the install.
""" % oo_cfg.config_path
click.echo(message)
# Make sure we actually write out the config file.
oo_cfg.save_to_disk()
sys.exit(0)
return default_facts
def check_hosts_config(oo_cfg, unattended):
click.clear()
masters = [host for host in oo_cfg.deployment.hosts if host.is_master()]
if len(masters) == 2:
click.echo("A minimum of 3 masters are required for HA deployments.")
sys.exit(1)
if len(masters) > 1:
master_lb = [host for host in oo_cfg.deployment.hosts if host.is_master_lb()]
if len(master_lb) > 1:
click.echo('ERROR: More than one master load balancer specified. Only one is allowed.')
sys.exit(1)
elif len(master_lb) == 1:
if master_lb[0].is_master() or master_lb[0].is_node():
click.echo('ERROR: The master load balancer is configured as a master or node. '
'Please correct this.')
sys.exit(1)
else:
message = """
ERROR: No master load balancer specified in config. You must provide the FQDN
of a load balancer to balance the API (port 8443) on all master hosts.
https://docs.openshift.org/latest/install_config/install/advanced_install.html#multiple-masters
"""
click.echo(message)
sys.exit(1)
dedicated_nodes = [host for host in oo_cfg.deployment.hosts
if host.is_node() and not host.is_master()]
if len(dedicated_nodes) == 0:
message = """
WARNING: No dedicated nodes specified. By default, colocated masters have
their nodes set to unschedulable. If you proceed all nodes will be labelled
as schedulable.
"""
if unattended:
click.echo(message)
else:
confirm_continue(message)
return
def get_variant_and_version(multi_master=False):
message = "\nWhich variant would you like to install?\n\n"
i = 1
combos = get_variant_version_combos()
for (variant, _) in combos:
message = "%s\n(%s) %s" % (message, i, variant.description)
i = i + 1
message = "%s\n" % message
click.echo(message)
if multi_master:
click.echo('NOTE: 3.0 installations are not')
response = click.prompt("Choose a variant from above: ", default=1)
product, version = combos[response - 1]
return product, version
def confirm_continue(message):
if message:
click.echo(message)
click.confirm("Are you ready to continue?", default=False, abort=True)
return
def error_if_missing_info(oo_cfg):
missing_info = False
if not oo_cfg.deployment.hosts:
missing_info = True
click.echo('For unattended installs, hosts must be specified on the '
'command line or in the config file: %s' % oo_cfg.config_path)
sys.exit(1)
if 'ansible_ssh_user' not in oo_cfg.deployment.variables:
click.echo("Must specify ansible_ssh_user in configuration file.")
sys.exit(1)
# Lookup a variant based on the key we were given:
if not oo_cfg.settings['variant']:
click.echo("No variant specified in configuration file.")
sys.exit(1)
ver = None
if 'variant_version' in oo_cfg.settings:
ver = oo_cfg.settings['variant_version']
variant, version = find_variant(oo_cfg.settings['variant'], version=ver)
if variant is None or version is None:
err_variant_name = oo_cfg.settings['variant']
if ver:
err_variant_name = "%s %s" % (err_variant_name, ver)
click.echo("%s is not an installable variant." % err_variant_name)
sys.exit(1)
oo_cfg.settings['variant_version'] = version.name
# check that all listed host roles are included
listed_roles = oo_cfg.get_host_roles_set()
configured_roles = set([role for role in oo_cfg.deployment.roles])
if listed_roles != configured_roles:
missing_info = True
click.echo('Any roles assigned to hosts must be defined.')
if missing_info:
sys.exit(1)
def get_proxy_hosts_excludes():
message = """
If a proxy is needed to reach HTTP and HTTPS traffic, please enter the
name below. This proxy will be configured by default for all processes
that need to reach systems outside the cluster. An example proxy value
would be:
http://proxy.example.com:8080/
More advanced configuration is possible if using Ansible directly:
https://docs.openshift.com/enterprise/latest/install_config/http_proxies.html
"""
click.echo(message)
message = "Specify your http proxy ? (ENTER for none)"
http_proxy_hostname = click.prompt(message, default='')
# TODO: Fix this prompt message and behavior. 'ENTER' will default
# to the http_proxy_hostname if one was provided
message = "Specify your https proxy ? (ENTER for none)"
https_proxy_hostname = click.prompt(message, default=http_proxy_hostname)
if http_proxy_hostname or https_proxy_hostname:
message = """
All hosts in your OpenShift inventory will automatically be added to the NO_PROXY value.
Please provide any additional hosts to be added to NO_PROXY. (ENTER for none)
"""
proxy_excludes = click.prompt(message, default='')
else:
proxy_excludes = ''
return http_proxy_hostname, https_proxy_hostname, proxy_excludes
def get_missing_info_from_user(oo_cfg):
""" Prompts the user for any information missing from the given configuration. """
click.clear()
message = """
Welcome to the OpenShift Enterprise 3 installation.
Please confirm that following prerequisites have been met:
* All systems where OpenShift will be installed are running Red Hat Enterprise
Linux 7.
* All systems are properly subscribed to the required OpenShift Enterprise 3
repositories.
* All systems have run docker-storage-setup (part of the Red Hat docker RPM).
* All systems have working DNS that resolves not only from the perspective of
the installer, but also from within the cluster.
When the process completes you will have a default configuration for masters
and nodes. For ongoing environment maintenance it's recommended that the
official Ansible playbooks be used.
For more information on installation prerequisites please see:
https://docs.openshift.com/enterprise/latest/admin_guide/install/prerequisites.html
"""
confirm_continue(message)
click.clear()
if not oo_cfg.deployment.variables.get('ansible_ssh_user', False):
oo_cfg.deployment.variables['ansible_ssh_user'] = get_ansible_ssh_user()
click.clear()
if not oo_cfg.settings.get('variant', ''):
variant, version = get_variant_and_version()
oo_cfg.settings['variant'] = variant.name
oo_cfg.settings['variant_version'] = version.name
oo_cfg.settings['variant_subtype'] = version.subtype
click.clear()
if not oo_cfg.deployment.hosts:
oo_cfg.deployment.hosts, roles = collect_hosts(oo_cfg)
set_infra_nodes(oo_cfg.deployment.hosts)
for role in roles:
oo_cfg.deployment.roles[role] = Role(name=role, variables={})
click.clear()
if 'master_routingconfig_subdomain' not in oo_cfg.deployment.variables:
oo_cfg.deployment.variables['master_routingconfig_subdomain'] = \
get_routingconfig_subdomain()
click.clear()
# Are any proxy vars already presisted?
proxy_vars = ['proxy_exclude_hosts', 'proxy_https', 'proxy_http']
# Empty list if NO proxy vars were presisted
saved_proxy_vars = [pv for pv in proxy_vars
if oo_cfg.deployment.variables.get(pv, 'UNSET') is not 'UNSET']
INSTALLER_LOG.debug("Evaluated proxy settings, found %s presisted values",
len(saved_proxy_vars))
current_version = parse_version(
oo_cfg.settings.get('variant_version', '0.0'))
min_version = parse_version('3.2')
# No proxy vars were saved and we are running a version which
# recognizes proxy parameters. We must prompt the user for values
# if this conditional is true.
if not saved_proxy_vars and current_version >= min_version:
INSTALLER_LOG.debug("Prompting user to enter proxy values")
http_proxy, https_proxy, proxy_excludes = get_proxy_hosts_excludes()
oo_cfg.deployment.variables['proxy_http'] = http_proxy
oo_cfg.deployment.variables['proxy_https'] = https_proxy
oo_cfg.deployment.variables['proxy_exclude_hosts'] = proxy_excludes
click.clear()
return oo_cfg
def collect_new_nodes(oo_cfg):
click.clear()
click.echo('*** New Node Configuration ***')
message = """
Add new nodes here
"""
click.echo(message)
new_nodes, _ = collect_hosts(oo_cfg, existing_env=True, masters_set=True, print_summary=False)
return new_nodes
def get_installed_hosts(hosts, callback_facts):
installed_hosts = []
uninstalled_hosts = []
for host in [h for h in hosts if h.is_master() or h.is_node()]:
if host.connect_to in callback_facts.keys():
if is_installed_host(host, callback_facts):
INSTALLER_LOG.debug("%s is already installed", str(host))
installed_hosts.append(host)
else:
INSTALLER_LOG.debug("%s is not installed", str(host))
uninstalled_hosts.append(host)
return installed_hosts, uninstalled_hosts
def is_installed_host(host, callback_facts):
version_found = 'common' in callback_facts[host.connect_to].keys() and \
callback_facts[host.connect_to]['common'].get('version', '') and \
callback_facts[host.connect_to]['common'].get('version', '') != 'None'
return version_found
def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force):
"""
We get here once there are hosts in oo_cfg and we need to find out what
state they are in. There are several different cases that might occur:
1. All hosts in oo_cfg are uninstalled. In this case, we should proceed
with a normal installation.
2. All hosts in oo_cfg are installed. In this case, ask the user if they
want to force reinstall or exit. We can also hint in this case about
the scaleup workflow.
3. Some hosts are installed and some are uninstalled. In this case, prompt
the user if they want to force (re)install all hosts specified or direct
them to the scaleup workflow and exit.
"""
hosts_to_run_on = []
# Check if master or nodes already have something installed
installed_hosts, uninstalled_hosts = get_installed_hosts(oo_cfg.deployment.hosts,
callback_facts)
nodes = [host for host in oo_cfg.deployment.hosts if host.is_node()]
masters_and_nodes = [host for host in oo_cfg.deployment.hosts if host.is_master() or host.is_node()]
in_hosts = [str(h) for h in installed_hosts]
un_hosts = [str(h) for h in uninstalled_hosts]
all_hosts = [str(h) for h in oo_cfg.deployment.hosts]
m_and_n = [str(h) for h in masters_and_nodes]
INSTALLER_LOG.debug("installed hosts: %s", ", ".join(in_hosts))
INSTALLER_LOG.debug("uninstalled hosts: %s", ", ".join(un_hosts))
INSTALLER_LOG.debug("deployment hosts: %s", ", ".join(all_hosts))
INSTALLER_LOG.debug("masters and nodes: %s", ", ".join(m_and_n))
# Case (1): All uninstalled hosts
if len(uninstalled_hosts) == len(nodes):
click.echo('All hosts in config are uninstalled. Proceeding with installation...')
hosts_to_run_on = list(oo_cfg.deployment.hosts)
else:
# Case (2): All installed hosts
if len(installed_hosts) == len(masters_and_nodes):
message = """
All specified hosts in specified environment are installed.
"""
# Case (3): Some installed, some uninstalled
else:
message = """
A mix of installed and uninstalled hosts have been detected in your environment.
Please make sure your environment was installed successfully before adding new nodes.
"""
# Still inside the case 2/3 else condition
mixed_msg = """
\tInstalled hosts:
\t\t{inst_hosts}
\tUninstalled hosts:
\t\t{uninst_hosts}""".format(inst_hosts=", ".join(in_hosts), uninst_hosts=", ".join(un_hosts))
click.echo(mixed_msg)
# Out of the case 2/3 if/else
click.echo(message)
if not unattended:
response = click.confirm('Do you want to (re)install the environment?\n\n'
'Note: This will potentially erase any custom changes.')
if response:
hosts_to_run_on = list(oo_cfg.deployment.hosts)
force = True
elif unattended and force:
hosts_to_run_on = list(oo_cfg.deployment.hosts)
if not force:
message = """
If you want to force reinstall of your environment, run:
`atomic-openshift-installer install --force`
If you want to add new nodes to this environment, run:
`atomic-openshift-installer scaleup`
"""
click.echo(message)
sys.exit(1)
return hosts_to_run_on, callback_facts
def set_infra_nodes(hosts):
if all(host.is_master() for host in hosts):
infra_list = hosts
else:
nodes_list = [host for host in hosts if host.is_schedulable_node(hosts)]
infra_list = nodes_list[:2]
for host in infra_list:
host.node_labels = "{'region': 'infra'}"
def run_config_playbook(oo_cfg, hosts_to_run_on, unattended, verbose, gen_inventory):
# Write Ansible inventory file to disk:
inventory_file = openshift_ansible.generate_inventory(hosts_to_run_on)
click.echo()
click.echo('Wrote atomic-openshift-installer config: %s' % oo_cfg.config_path)
click.echo("Wrote Ansible inventory: %s" % inventory_file)
click.echo()
if gen_inventory:
sys.exit(0)
click.echo('Ready to run installation process.')
message = """
If changes are needed please edit the installer.cfg.yml config file above and re-run.
"""
if not unattended:
confirm_continue(message)
error = openshift_ansible.run_main_playbook(inventory_file, oo_cfg.deployment.hosts,
hosts_to_run_on, verbose)
if error:
# The bootstrap script will print out the log location.
message = """
An error was detected. After resolving the problem please relaunch the
installation process.
"""
click.echo(message)
sys.exit(1)
else:
message = """
The installation was successful!
If this is your first time installing please take a look at the Administrator
Guide for advanced options related to routing, storage, authentication, and
more:
http://docs.openshift.com/enterprise/latest/admin_guide/overview.html
"""
click.echo(message)
@click.group(context_settings=dict(max_content_width=120))
@click.pass_context
@click.option('--unattended', '-u', is_flag=True, default=False)
@click.option('--configuration', '-c',
type=click.Path(file_okay=True,
dir_okay=False,
writable=True,
readable=True),
default=None)
@click.option('--ansible-playbook-directory',
'-a',
type=click.Path(exists=True,
file_okay=False,
dir_okay=True,
readable=True),
# callback=validate_ansible_dir,
default=DEFAULT_PLAYBOOK_DIR,
envvar='OO_ANSIBLE_PLAYBOOK_DIRECTORY')
@click.option('--ansible-log-path',
type=click.Path(file_okay=True,
dir_okay=False,
writable=True,
readable=True),
default="/tmp/ansible.log")
@click.option('-v', '--verbose',
is_flag=True, default=False)
@click.option('-d', '--debug',
help="Enable installer debugging (/tmp/installer.log)",
is_flag=True, default=False)
@click.help_option('--help', '-h')
# pylint: disable=too-many-arguments
# pylint: disable=line-too-long
# Main CLI entrypoint, not much we can do about too many arguments.
def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_log_path, verbose, debug):
"""
atomic-openshift-installer makes the process for installing OSE or AEP
easier by interactively gathering the data needed to run on each host.
It can also be run in unattended mode if provided with a configuration file.
Further reading: https://docs.openshift.com/enterprise/latest/install_config/install/quick_install.html
"""
if debug:
# DEFAULT log level threshold is set to CRITICAL (the
# highest), anything below that (we only use debug/warning
# presently) is not logged. If '-d' is given though, we'll
# lower the threshold to debug (almost everything gets through)
INSTALLER_LOG.setLevel(logging.DEBUG)
INSTALLER_LOG.debug("Quick Installer debugging initialized")
ctx.obj = {}
ctx.obj['unattended'] = unattended
ctx.obj['configuration'] = configuration
ctx.obj['ansible_log_path'] = ansible_log_path
ctx.obj['verbose'] = verbose
try:
oo_cfg = OOConfig(ctx.obj['configuration'])
except OOConfigInvalidHostError as err:
click.echo(err)
sys.exit(1)
# If no playbook dir on the CLI, check the config:
if not ansible_playbook_directory:
ansible_playbook_directory = oo_cfg.settings.get('ansible_playbook_directory', '')
# If still no playbook dir, check for the default location:
if not ansible_playbook_directory and os.path.exists(DEFAULT_PLAYBOOK_DIR):
ansible_playbook_directory = DEFAULT_PLAYBOOK_DIR
validate_ansible_dir(ansible_playbook_directory)
oo_cfg.settings['ansible_playbook_directory'] = ansible_playbook_directory
oo_cfg.ansible_playbook_directory = ansible_playbook_directory
ctx.obj['ansible_playbook_directory'] = ansible_playbook_directory
if os.path.exists(DEFAULT_ANSIBLE_CONFIG):
# If we're installed by RPM this file should exist and we can use it as our default:
oo_cfg.settings['ansible_config'] = DEFAULT_ANSIBLE_CONFIG
if not verbose and os.path.exists(QUIET_ANSIBLE_CONFIG):
oo_cfg.settings['ansible_quiet_config'] = QUIET_ANSIBLE_CONFIG
oo_cfg.settings['ansible_log_path'] = ctx.obj['ansible_log_path']
ctx.obj['oo_cfg'] = oo_cfg
openshift_ansible.set_config(oo_cfg)
@click.command()
@click.pass_context
def uninstall(ctx):
oo_cfg = ctx.obj['oo_cfg']
verbose = ctx.obj['verbose']
if hasattr(oo_cfg, 'deployment'):
hosts = oo_cfg.deployment.hosts
elif hasattr(oo_cfg, 'hosts'):
hosts = oo_cfg.hosts
else:
click.echo("No hosts defined in: %s" % oo_cfg.config_path)
sys.exit(1)
click.echo("OpenShift will be uninstalled from the following hosts:\n")
if not ctx.obj['unattended']:
# Prompt interactively to confirm:
for host in hosts:
click.echo(" * %s" % host.connect_to)
proceed = click.confirm("\nDo you want to proceed?")
if not proceed:
click.echo("Uninstall cancelled.")
sys.exit(0)
openshift_ansible.run_uninstall_playbook(hosts, verbose)
@click.command(context_settings=dict(max_content_width=120))
@click.option('--latest-minor', '-l', is_flag=True, default=False)
@click.option('--next-major', '-n', is_flag=True, default=False)
@click.pass_context
# pylint: disable=too-many-statements,too-many-branches
def upgrade(ctx, latest_minor, next_major):
click.echo("Upgrades are no longer supported by this version of installer")
click.echo("Please see the documentation for manual upgrade:")
click.echo("https://docs.openshift.com/container-platform/latest/install_config/upgrading/automated_upgrades.html")
sys.exit(1)
@click.command()
@click.option('--force', '-f', is_flag=True, default=False)
@click.option('--gen-inventory', is_flag=True, default=False,
help="Generate an Ansible inventory file and exit.")
@click.pass_context
def install(ctx, force, gen_inventory):
oo_cfg = ctx.obj['oo_cfg']
verbose = ctx.obj['verbose']
unattended = ctx.obj['unattended']
if unattended:
error_if_missing_info(oo_cfg)
else:
oo_cfg = get_missing_info_from_user(oo_cfg)
check_hosts_config(oo_cfg, unattended)
print_installation_summary(oo_cfg.deployment.hosts,
oo_cfg.settings.get('variant_version', None))
click.echo('Gathering information from hosts...')
callback_facts, error = openshift_ansible.default_facts(oo_cfg.deployment.hosts,
verbose)
if error or callback_facts is None:
click.echo("There was a problem fetching the required information. "
"Please see {} for details.".format(oo_cfg.settings['ansible_log_path']))
sys.exit(1)
hosts_to_run_on, callback_facts = get_hosts_to_run_on(oo_cfg,
callback_facts,
unattended,
force)
# We already verified this is not the case for unattended installs, so this can
# only trigger for live CLI users:
if not ctx.obj['unattended'] and len(oo_cfg.calc_missing_facts()) > 0:
confirm_hosts_facts(oo_cfg, callback_facts)
# Write quick installer config file to disk:
oo_cfg.save_to_disk()
run_config_playbook(oo_cfg, hosts_to_run_on, unattended, verbose, gen_inventory)
@click.command()
@click.option('--gen-inventory', is_flag=True, default=False,
help="Generate an Ansible inventory file and exit.")
@click.pass_context
def scaleup(ctx, gen_inventory):
oo_cfg = ctx.obj['oo_cfg']
verbose = ctx.obj['verbose']
unattended = ctx.obj['unattended']
installed_hosts = list(oo_cfg.deployment.hosts)
if len(installed_hosts) == 0:
click.echo('No hosts specified.')
sys.exit(1)
click.echo('Welcome to the OpenShift Enterprise 3 Scaleup utility.')
# Scaleup requires manual data entry. Therefore, we do not support
# unattended operations.
if unattended:
msg = """
---
The 'scaleup' operation does not support unattended
functionality. Re-run the installer without the '-u' or '--unattended'
option to continue.
"""
click.echo(msg)
sys.exit(1)
# Resume normal scaleup workflow
print_installation_summary(installed_hosts,
oo_cfg.settings['variant_version'],
verbose=False,)
message = """
---
We have detected this previously installed OpenShift environment.
This tool will guide you through the process of adding additional
nodes to your cluster.
"""
confirm_continue(message)
error_if_missing_info(oo_cfg)
check_hosts_config(oo_cfg, True)
installed_masters = [host for host in installed_hosts if host.is_master()]
new_nodes = collect_new_nodes(oo_cfg)
oo_cfg.deployment.hosts.extend(new_nodes)
hosts_to_run_on = installed_masters + new_nodes
openshift_ansible.set_config(oo_cfg)
click.echo('Gathering information from hosts...')
callback_facts, error = openshift_ansible.default_facts(oo_cfg.deployment.hosts, verbose)
if error or callback_facts is None:
click.echo("There was a problem fetching the required information. See "
"{} for details.".format(oo_cfg.settings['ansible_log_path']))
sys.exit(1)
print_installation_summary(oo_cfg.deployment.hosts,
oo_cfg.settings.get('variant_version', None))
click.echo('Gathering information from hosts...')
callback_facts, error = openshift_ansible.default_facts(oo_cfg.deployment.hosts,
verbose)
if error or callback_facts is None:
click.echo("There was a problem fetching the required information. "
"Please see {} for details.".format(oo_cfg.settings['ansible_log_path']))
sys.exit(1)
# We already verified this is not the case for unattended installs, so this can
# only trigger for live CLI users:
if not ctx.obj['unattended'] and len(oo_cfg.calc_missing_facts()) > 0:
confirm_hosts_facts(oo_cfg, callback_facts)
# Write quick installer config file to disk:
oo_cfg.save_to_disk()
run_config_playbook(oo_cfg, hosts_to_run_on, unattended, verbose, gen_inventory)
cli.add_command(install)
cli.add_command(scaleup)
cli.add_command(upgrade)
cli.add_command(uninstall)
if __name__ == '__main__':
# This is expected behaviour for context passing with click library:
# pylint: disable=unexpected-keyword-arg
cli(obj={})
|
#!/usr/bin/env python
# encoding: utf-8
""" Condor job adaptor implementation
"""
__author__ = "Ole Weidner"
__copyright__ = "Copyright 2013, The SAGA Project"
__license__ = "MIT"
import saga.utils.which
import saga.utils.pty_shell
import saga.adaptors.cpi.base
import saga.adaptors.cpi.job
from saga.job.constants import *
from transferdirectives import TransferDirectives
import re
import os
import time
from copy import deepcopy
from cgi import parse_qs
SYNC_CALL = saga.adaptors.cpi.decorators.SYNC_CALL
ASYNC_CALL = saga.adaptors.cpi.decorators.ASYNC_CALL
# --------------------------------------------------------------------
#
def log_error_and_raise(message, exception, logger):
logger.error(message)
raise exception(message)
# --------------------------------------------------------------------
#
def _condor_to_saga_jobstate(condorjs):
""" translates a condor one-letter state to saga
"""
# From: http://pages.cs.wisc.edu/~adesmet/status.html
#
# JobStatus in job ClassAds
#
# 0 Unexpanded U
# 1 Idle I
# 2 Running R
# 3 Removed X
# 4 Completed C
# 5 Held H
# 6 Submission_err E
if int(condorjs) == 0:
return saga.job.PENDING
elif int(condorjs) == 1:
return saga.job.PENDING
elif int(condorjs) == 2:
return saga.job.RUNNING
elif int(condorjs) == 3:
return saga.job.CANCELED
elif int(condorjs) == 4:
return saga.job.DONE
elif int(condorjs) == 5:
return saga.job.PENDING
elif int(condorjs) == 6:
return saga.job.FAILED
else:
return saga.job.UNKNOWN
# --------------------------------------------------------------------
#
def _condorscript_generator(url, logger, jd, option_dict=None):
""" generates a Condor script from a SAGA job description
"""
condor_file = str()
##### OPTIONS PASSED VIA JOB SERVICE URL #####
##
if option_dict is not None:
condor_file += "\n##### DEFAULT OPTIONS PASSED VIA JOB SERVICE URL #####\n##"
# special treatment for universe - defaults to 'vanilla'
if 'universe' not in option_dict:
condor_file += "\nuniverse = vanilla"
for (key, value) in option_dict.iteritems():
condor_file += "\n%s = %s" % (key, value)
##### OPTIONS PASSED VIA JOB DESCRIPTION #####
##
condor_file += "\n\n##### OPTIONS PASSED VIA JOB SERVICE URL #####\n##"
requirements = "requirements = "
# executable -> executable
if jd.executable is not None:
condor_file += "\nexecutable = %s" % jd.executable
# arguments -> arguments
arguments = "arguments = "
if jd.arguments is not None:
for arg in jd.arguments:
# Condor HATES double quotes in the arguments. It'll return
# some crap like: "Found illegal unescaped double-quote: ...
# That's why we esacpe them.
arguments += "%s " % (arg.replace('"', '\\"'))
condor_file += "\n%s" % arguments
# file_transfer -> transfer_input_files
if jd.file_transfer is not None:
td = TransferDirectives(jd.file_transfer)
if len(td.in_append_dict) > 0:
raise Exception('FileTransfer append syntax (>>) not supported by Condor: %s' % td.in_append_dict)
if len(td.out_append_dict) > 0:
raise Exception('FileTransfer append syntax (<<) not supported by Condor: %s' % td.out_append_dict)
if len(td.in_overwrite_dict) > 0:
transfer_input_files = "transfer_input_files = "
for (source, target) in td.in_overwrite_dict.iteritems():
# make sure source is file an not dir
(s_path, s_entry) = os.path.split(source)
if len(s_entry) < 1:
raise Exception('Condor accepts only files (not directories) as FileTransfer sources: %s' % source)
# make sure target is just a file
(t_path, t_entry) = os.path.split(target)
if len(t_path) > 1:
raise Exception('Condor accepts only filenames (without paths) as FileTransfer targets: %s' % target)
# make sure source and target file are the same
if s_entry != t_entry:
raise Exception('For Condor source file name and target file name have to be identical: %s != %s' % (s_entry, t_entry))
# entry ok - add to job script
transfer_input_files += "%s, " % source
condor_file += "\n%s" % transfer_input_files
if len(td.out_overwrite_dict) > 0:
transfer_output_files = "transfer_output_files = "
for (source, target) in td.out_overwrite_dict.iteritems():
# make sure source is file an not dir
(s_path, s_entry) = os.path.split(source)
if len(s_entry) < 1:
raise Exception('Condor accepts only files (not directories) as FileTransfer sources: %s' % source)
# make sure target is just a file
(t_path, t_entry) = os.path.split(target)
if len(t_path) > 1:
raise Exception('Condor accepts only filenames (without paths) as FileTransfer targets: %s' % target)
# make sure source and target file are the same
if s_entry != t_entry:
raise Exception('For Condor source file name and target file name have to be identical: %s != %s' % (s_entry, t_entry))
# entry ok - add to job script
transfer_output_files += "%s, " % source
condor_file += "\n%s" % transfer_output_files
# always define log
condor_file += "\nlog = saga-condor-job-$(cluster).log "
# output -> output
if jd.output is not None:
condor_file += "\noutput = %s " % jd.output
# error -> error
if jd.error is not None:
condor_file += "\nerror = %s " % jd.error
# environment -> environment
environment = "environment = "
if jd.environment is not None:
variable_list = str()
for key in jd.environment.keys():
variable_list += "%s=%s;" % (key, jd.environment[key])
environment += "%s " % variable_list
condor_file += "\n%s" % environment
# project -> +ProjectName
if jd.project is not None:
condor_file += "\n+ProjectName = \"%s\"" % str(jd.project)
# candidate hosts -> SiteList + requirements
if jd.candidate_hosts is not None:
hosts = ""
for host in jd.candidate_hosts:
hosts += "%s, " % host
sitelist = "+SiteList = \"%s\"" % hosts
requirements += "(stringListMember(GLIDEIN_ResourceName,SiteList) == True)"
condor_file += "\n%s" % sitelist
condor_file += "\n%s" % requirements
condor_file += "\n\nqueue"
return condor_file
# --------------------------------------------------------------------
# some private defs
#
_PTY_TIMEOUT = 2.0
# --------------------------------------------------------------------
# the adaptor name
#
_ADAPTOR_NAME = "saga.adaptor.condorjob"
_ADAPTOR_SCHEMAS = ["condor", "condor+ssh", "condor+gsissh"]
_ADAPTOR_OPTIONS = [
{
'category': 'saga.adaptor.condorjob',
'name': 'foo',
'type': bool,
'default': False,
'valid_options': [True, False],
'documentation': """Doc""",
'env_variable': None
},
]
# --------------------------------------------------------------------
# the adaptor capabilities & supported attributes
#
_ADAPTOR_CAPABILITIES = {
"jdes_attributes": [saga.job.NAME,
saga.job.EXECUTABLE,
saga.job.ARGUMENTS,
saga.job.ENVIRONMENT,
saga.job.INPUT,
saga.job.OUTPUT,
saga.job.ERROR,
saga.job.QUEUE,
saga.job.PROJECT,
saga.job.WALL_TIME_LIMIT,
saga.job.WORKING_DIRECTORY,
saga.job.CANDIDATE_HOSTS,
saga.job.TOTAL_CPU_COUNT],
"job_attributes": [saga.job.EXIT_CODE,
saga.job.EXECUTION_HOSTS,
saga.job.CREATED,
saga.job.STARTED,
saga.job.FINISHED],
"metrics": [saga.job.STATE],
"contexts": {"ssh": "SSH public/private keypair",
"x509": "GSISSH X509 proxy context",
"userpass": "username/password pair (ssh)"}
}
# --------------------------------------------------------------------
# the adaptor documentation
#
_ADAPTOR_DOC = {
"name": _ADAPTOR_NAME,
"cfg_options": _ADAPTOR_OPTIONS,
"capabilities": _ADAPTOR_CAPABILITIES,
"description": """The Condor adaptor can run and manage jobs on local and
remote Condor gateways.""",
"details": """TODO""",
"schemas": {"condor": "connect to a local Condor gateway",
"condor+ssh": "conenct to a remote Condor gateway via SSH",
"condor+gsissh": "connect to a remote Condor gateway via GSISSH"}
}
# --------------------------------------------------------------------
# the adaptor info is used to register the adaptor with SAGA
#
_ADAPTOR_INFO = {
"name": _ADAPTOR_NAME,
"version": "v0.1",
"schemas": _ADAPTOR_SCHEMAS,
"cpis": [
{
"type": "saga.job.Service",
"class": "CondorJobService"
},
{
"type": "saga.job.Job",
"class": "CondorJob"
}
]
}
###############################################################################
# The adaptor class
class Adaptor (saga.adaptors.cpi.base.AdaptorBase):
""" this is the actual adaptor class, which gets loaded by SAGA (i.e. by
the SAGA engine), and which registers the CPI implementation classes
which provide the adaptor's functionality.
"""
# ----------------------------------------------------------------
#
def __init__(self):
saga.adaptors.cpi.base.AdaptorBase.__init__(self,
_ADAPTOR_INFO, _ADAPTOR_OPTIONS)
self.id_re = re.compile('^\[(.*)\]-\[(.*?)\]$')
self.opts = self.get_config()
self.foo = self.opts['foo'].get_value()
#self._logger.info('debug trace : %s' % self.debug_trace)
# ----------------------------------------------------------------
#
def sanity_check(self):
# FIXME: also check for gsissh
pass
# ----------------------------------------------------------------
#
def parse_id(self, id):
# split the id '[rm]-[pid]' in its parts, and return them.
match = self.id_re.match(id)
if not match or len(match.groups()) != 2:
raise saga.BadParameter("Cannot parse job id '%s'" % id)
return (match.group(1), match.group(2))
###############################################################################
#
class CondorJobService (saga.adaptors.cpi.job.Service):
""" implements saga.adaptors.cpi.job.Service
"""
# ----------------------------------------------------------------
#
def __init__(self, api, adaptor):
self._cpi_base = super(CondorJobService, self)
self._cpi_base.__init__(api, adaptor)
# ----------------------------------------------------------------
#
def __del__(self):
# FIXME: not sure if we should PURGE here -- that removes states which
# might not be evaluated, yet. Should we mark state evaluation
# separately?
# cmd_state () { touch $DIR/purgeable; ... }
# When should that be done?
#self._logger.error("adaptor dying... %s" % self.njobs)
#self._logger.trace()
self.finalize(kill_shell=True)
# ----------------------------------------------------------------
#
@SYNC_CALL
def init_instance(self, adaptor_state, rm_url, session):
""" service instance constructor
"""
# Turn this off by default.
self._disable_ptywrapper_logging = True
self.rm = rm_url
self.session = session
self.ppn = 0
self.is_cray = False
self.jobs = dict()
self.query_options = dict()
rm_scheme = rm_url.scheme
pty_url = deepcopy(rm_url)
# this adaptor supports options that can be passed via the
# 'query' component of the job service URL.
if rm_url.query is not None:
for key, val in parse_qs(rm_url.query).iteritems():
self.query_options[key] = val[0]
# we need to extrac the scheme for PTYShell. That's basically the
# job.Serivce Url withou the condor+ part. We use the PTYShell to execute
# condor commands either locally or via gsissh or ssh.
if rm_scheme == "condor":
pty_url.scheme = "fork"
elif rm_scheme == "condor+ssh":
pty_url.scheme = "ssh"
elif rm_scheme == "condor+gsissh":
pty_url.scheme = "gsissh"
# these are the commands that we need in order to interact with Condor.
# the adaptor will try to find them during initialize(self) and bail
# out in case they are note avaialbe.
self._commands = {'condor_version': None,
'condor_submit': None,
'condor_q': None,
'condor_rm': None}
if self._disable_ptywrapper_logging:
# create a null logger to silence the PTY wrapper!
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
nh = NullHandler()
null_logger = logging.getLogger("PTYShell").addHandler(nh)
self.shell = saga.utils.pty_shell.PTYShell(pty_url,
self.session, null_logger)
else:
self.shell = saga.utils.pty_shell.PTYShell(pty_url,
self.session)
self.shell.set_initialize_hook(self.initialize)
self.shell.set_finalize_hook(self.finalize)
self.initialize()
# ----------------------------------------------------------------
#
def initialize(self):
# check if all required condor tools are available
for cmd in self._commands.keys():
ret, out, _ = self.shell.run_sync("which %s " % cmd)
if ret != 0:
message = "Error finding Condor tools: %s" % out
log_error_and_raise(message, saga.NoSuccess, self._logger)
else:
path = out.strip() # strip removes newline
if cmd == 'condor_version':
ret, out, _ = self.shell.run_sync("%s" % cmd)
if ret != 0:
message = "Error determining Condor version: %s" % out
log_error_and_raise(message, saga.NoSuccess,
self._logger)
else:
# version is reported as:
# $CondorVersion: 7.8.6 Oct 25 2012 $
# $CondorPlatform: X86_64-CentOS_5.7 $
lines = out.split('\n')
version = lines[0].replace("$CondorVersion: ", "")
version = version.strip(" $")
# add path and version to the command dictionary
self._commands[cmd] = {"path": path,
"version": version}
self._logger.info("Found Condor tools: %s" % self._commands)
# ----------------------------------------------------------------
#
def finalize(self, kill_shell=False):
pass
# ----------------------------------------------------------------
#
def _job_run(self, jd):
""" runs a job via qsub
"""
# create a Condor job script from SAGA job description
script = _condorscript_generator(url=self.rm, logger=self._logger, jd=jd,
option_dict=self.query_options)
self._logger.debug("Generated Condor script: %s" % script)
ret, out, _ = self.shell.run_sync("echo \'%s\' | %s -" \
% (script, self._commands['condor_submit']['path']))
if ret != 0:
# something went wrong
message = "Error running job via 'condor_submit': %s. Script was: %s" \
% (out, script)
log_error_and_raise(message, saga.NoSuccess, self._logger)
else:
# stdout contains the job id
for line in out.split("\n"):
if "** Proc" in line:
pid = line.split()[2][:-1]
# we don't want the 'query' part of the URL to be part of the ID,
# simply because it can get terribly long (and ugly). to get rid
# of it, we clone the URL and set the query part to None.
rm_clone = deepcopy(self.rm)
rm_clone.query = ""
rm_clone.path = ""
job_id = "[%s]-[%s]" % (rm_clone, pid)
self._logger.info("Submitted Condor job with id: %s" % job_id)
# add job to internal list of known jobs.
self.jobs[job_id] = {
'state': saga.job.PENDING,
'exec_hosts': None,
'returncode': None,
'create_time': None,
'start_time': None,
'end_time': None,
'gone': False
}
return job_id
# ----------------------------------------------------------------
#
def _retrieve_job(self, job_id):
""" see if we can get some info about a job that we don't
know anything about
"""
rm, pid = self._adaptor.parse_id(job_id)
# run the Condor 'condor_q' command to get some infos about our job
ret, out, _ = self.shell.run_sync("%s -long %s | \
egrep '(JobStatus)|(ExitStatus)|(CompletionDate)'" \
% (self._commands['condor_q']['path'], pid))
if ret != 0:
message = "Couldn't reconnect to job '%s': %s" % (job_id, out)
log_error_and_raise(message, saga.NoSuccess, self._logger)
else:
# the job seems to exist on the backend. let's gather some data
job_info = {
'state': saga.job.UNKNOWN,
'exec_hosts': None,
'returncode': None,
'create_time': None,
'start_time': None,
'end_time': None,
'gone': False
}
results = out.split('\n')
for result in results:
if len(result.split('=')) == 2:
key, val = result.split('=')
key = key.strip() # strip() removes whitespaces at the
val = val.strip() # beginning and the end of the string
if key == 'JobStatus':
job_info['state'] = _condor_to_saga_jobstate(val)
elif key == 'ExitStatus':
job_info['returncode'] = val
elif key == 'CompletionDate':
job_info['end_time'] = val
return job_info
# ----------------------------------------------------------------
#
def _job_get_info(self, job_id):
""" get job attributes via condor_q
"""
# if we don't have the job in our dictionary, we don't want it
if job_id not in self.jobs:
message = "Unkown job ID: %s. Can't update state." % job_id
log_error_and_raise(message, saga.NoSuccess, self._logger)
# prev. info contains the info collect when _job_get_info
# was called the last time
prev_info = self.jobs[job_id]
# if the 'gone' flag is set, there's no need to query the job
# state again. it's gone forever
if prev_info['gone'] is True:
self._logger.warning("Job information is not available anymore.")
return prev_info
# curr. info will contain the new job info collect. it starts off
# as a copy of prev_info
curr_info = deepcopy(prev_info)
rm, pid = self._adaptor.parse_id(job_id)
# run the Condor 'condor_q' command to get some infos about our job
ret, out, _ = self.shell.run_sync("%s -long %s | \
egrep '(JobStatus)|(ExitStatus)|(CompletionDate)'" \
% (self._commands['condor_q']['path'], pid))
if ret != 0:
#if ("Unknown Job Id" in out):
# Let's see if the previous job state was runnig or pending. in
# that case, the job is gone now, which can either mean DONE,
# or FAILED. the only thing we can do is set it to 'DONE'
if prev_info['state'] in [saga.job.RUNNING, saga.job.PENDING]:
curr_info['state'] = saga.job.DONE
curr_info['gone'] = True
self._logger.warning("Previously running job has \
disappeared. This probably means that the backend doesn't store any information \
about finished jobs. Setting state to 'DONE'.")
else:
curr_info['gone'] = True
#else:
# # something went wrong
# message = "Error retrieving job info via 'condor_q': %s" % out
# log_error_and_raise(message, saga.NoSuccess, self._logger)
else:
# parse the egrep result. this should look something like this:
# JobStatus = 5
# ExitStatus = 0
# CompletionDate = 0
results = out.split('\n')
for result in results:
if len(result.split('=')) == 2:
key, val = result.split('=')
key = key.strip() # strip() removes whitespaces at the
val = val.strip() # beginning and the end of the string
if key == 'JobStatus':
curr_info['state'] = _condor_to_saga_jobstate(val)
elif key == 'ExitStatus':
curr_info['returncode'] = val
elif key == 'CompletionDate':
curr_info['end_time'] = val
# return the new job info dict
return curr_info
# ----------------------------------------------------------------
#
def _job_get_state(self, job_id):
""" get the job's state
"""
# check if we have already reach a terminal state
if self.jobs[job_id]['state'] == saga.job.CANCELED \
or self.jobs[job_id]['state'] == saga.job.FAILED \
or self.jobs[job_id]['state'] == saga.job.DONE:
return self.jobs[job_id]['state']
# check if we can / should update
if (self.jobs[job_id]['gone'] is not True):
self.jobs[job_id] = self._job_get_info(job_id=job_id)
return self.jobs[job_id]['state']
# ----------------------------------------------------------------
#
def _job_get_exit_code(self, job_id):
""" get the job's exit code
"""
# check if we can / should update
if (self.jobs[job_id]['gone'] is not True) \
and (self.jobs[job_id]['returncode'] is None):
self.jobs[job_id] = self._job_get_info(job_id=job_id)
return self.jobs[job_id]['returncode']
# ----------------------------------------------------------------
#
def _job_get_execution_hosts(self, job_id):
""" get the job's exit code
"""
# check if we can / should update
if (self.jobs[job_id]['gone'] is not True) \
and (self.jobs[job_id]['exec_hosts'] is None):
self.jobs[job_id] = self._job_get_info(job_id=job_id)
return self.jobs[job_id]['exec_hosts']
# ----------------------------------------------------------------
#
def _job_get_create_time(self, job_id):
""" get the job's creation time
"""
# check if we can / should update
if (self.jobs[job_id]['gone'] is not True) \
and (self.jobs[job_id]['create_time'] is None):
self.jobs[job_id] = self._job_get_info(job_id=job_id)
return self.jobs[job_id]['create_time']
# ----------------------------------------------------------------
#
def _job_get_start_time(self, job_id):
""" get the job's start time
"""
# check if we can / should update
if (self.jobs[job_id]['gone'] is not True) \
and (self.jobs[job_id]['start_time'] is None):
self.jobs[job_id] = self._job_get_info(job_id=job_id)
return self.jobs[job_id]['start_time']
# ----------------------------------------------------------------
#
def _job_get_end_time(self, job_id):
""" get the job's end time
"""
# check if we can / should update
if (self.jobs[job_id]['gone'] is not True) \
and (self.jobs[job_id]['end_time'] is None):
self.jobs[job_id] = self._job_get_info(job_id=job_id)
return self.jobs[job_id]['end_time']
# ----------------------------------------------------------------
#
def _job_cancel(self, job_id):
""" cancel the job via 'condor_rm'
"""
rm, pid = self._adaptor.parse_id(job_id)
ret, out, _ = self.shell.run_sync("%s %s\n" \
% (self._commands['condor_rm']['path'], pid))
if ret != 0:
message = "Error canceling job via 'condor_rm': %s" % out
log_error_and_raise(message, saga.NoSuccess, self._logger)
# assume the job was succesfully canceld
self.jobs[job_id]['state'] = saga.job.CANCELED
# ----------------------------------------------------------------
#
def _job_wait(self, job_id, timeout):
""" wait for the job to finish or fail
"""
time_start = time.time()
time_now = time_start
rm, pid = self._adaptor.parse_id(job_id)
while True:
state = self._job_get_state(job_id=job_id)
if state == saga.job.DONE or \
state == saga.job.FAILED or \
state == saga.job.CANCELED:
return True
# avoid busy poll
time.sleep(0.5)
# check if we hit timeout
if timeout >= 0:
time_now = time.time()
if time_now - time_start > timeout:
return False
# ----------------------------------------------------------------
#
@SYNC_CALL
def create_job(self, jd):
""" implements saga.adaptors.cpi.job.Service.get_url()
"""
# check that only supported attributes are provided
for attribute in jd.list_attributes():
if attribute not in _ADAPTOR_CAPABILITIES["jdes_attributes"]:
message = "'jd.%s' is not supported by this adaptor" \
% attribute
log_error_and_raise(message, saga.BadParameter, self._logger)
# this dict is passed on to the job adaptor class -- use it to pass any
# state information you need there.
adaptor_state = {"job_service": self,
"job_description": jd,
"job_schema": self.rm.schema,
"reconnect": False
}
return saga.job.Job(_adaptor=self._adaptor,
_adaptor_state=adaptor_state)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_job(self, jobid):
""" Implements saga.adaptors.cpi.job.Service.get_job()
"""
# try to get some information about this job and throw it into
# our job dictionary.
self.jobs[jobid] = self._retrieve_job(jobid)
# this dict is passed on to the job adaptor class -- use it to pass any
# state information you need there.
adaptor_state = {"job_service": self,
# TODO: fill job description
"job_description": saga.job.Description(),
"job_schema": self.rm.schema,
"reconnect": True,
"reconnect_jobid": jobid
}
return saga.job.Job(_adaptor=self._adaptor,
_adaptor_state=adaptor_state)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_url(self):
""" implements saga.adaptors.cpi.job.Service.get_url()
"""
return self.rm
# ----------------------------------------------------------------
#
@SYNC_CALL
def list(self):
""" implements saga.adaptors.cpi.job.Service.list()
"""
ids = []
ret, out, _ = self.shell.run_sync("%s | grep `whoami`"\
% self._commands['condor_q']['path'])
if ret != 0 and len(out) > 0:
message = "failed to list jobs via 'condor_q': %s" % out
log_error_and_raise(message, saga.NoSuccess, self._logger)
elif ret != 0 and len(out) == 0:
pass
else:
for line in out.split("\n"):
# output looks like this:
# 112059.svc.uc.futuregrid testjob oweidner 0 Q batch
# 112061.svc.uc.futuregrid testjob oweidner 0 Q batch
if len(line.split()) > 1:
rm_clone = deepcopy(self.rm)
rm_clone.query = ""
rm_clone.path = ""
jobid = "[%s]-[%s]" % (rm_clone, line.split()[0])
ids.append(str(jobid))
return ids
# # ----------------------------------------------------------------
# #
# def container_run (self, jobs) :
# self._logger.debug ("container run: %s" % str(jobs))
# # TODO: this is not optimized yet
# for job in jobs:
# job.run ()
#
#
# # ----------------------------------------------------------------
# #
# def container_wait (self, jobs, mode, timeout) :
# self._logger.debug ("container wait: %s" % str(jobs))
# # TODO: this is not optimized yet
# for job in jobs:
# job.wait ()
#
#
# # ----------------------------------------------------------------
# #
# def container_cancel (self, jobs) :
# self._logger.debug ("container cancel: %s" % str(jobs))
# raise saga.NoSuccess ("Not Implemented");
###############################################################################
#
class CondorJob (saga.adaptors.cpi.job.Job):
""" implements saga.adaptors.cpi.job.Job
"""
def __init__(self, api, adaptor):
# initialize parent class
self._cpi_base = super(CondorJob, self)
self._cpi_base.__init__(api, adaptor)
@SYNC_CALL
def init_instance(self, job_info):
""" implements saga.adaptors.cpi.job.Job.init_instance()
"""
# init_instance is called for every new saga.job.Job object
# that is created
self.jd = job_info["job_description"]
self.js = job_info["job_service"]
if job_info['reconnect'] is True:
self._id = job_info['reconnect_jobid']
self._started = True
else:
self._id = None
self._started = False
return self.get_api()
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_state(self):
""" mplements saga.adaptors.cpi.job.Job.get_state()
"""
if self._started is False:
# jobs that are not started are always in 'NEW' state
return saga.job.NEW
else:
return self.js._job_get_state(self._id)
# ----------------------------------------------------------------
#
@SYNC_CALL
def wait(self, timeout):
""" implements saga.adaptors.cpi.job.Job.wait()
"""
if self._started is False:
log_error_and_raise("Can't wait for job that hasn't been started",
saga.IncorrectState, self._logger)
else:
self.js._job_wait(self._id, timeout)
# ----------------------------------------------------------------
#
@SYNC_CALL
def cancel(self, timeout):
""" implements saga.adaptors.cpi.job.Job.cancel()
"""
if self._started is False:
log_error_and_raise("Can't wait for job that hasn't been started",
saga.IncorrectState, self._logger)
else:
self.js._job_cancel(self._id)
# ----------------------------------------------------------------
#
@SYNC_CALL
def run(self):
""" implements saga.adaptors.cpi.job.Job.run()
"""
self._id = self.js._job_run(self.jd)
self._started = True
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_service_url(self):
""" implements saga.adaptors.cpi.job.Job.get_service_url()
"""
return self.js.rm
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_id(self):
""" implements saga.adaptors.cpi.job.Job.get_id()
"""
return self._id
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_exit_code(self):
""" implements saga.adaptors.cpi.job.Job.get_exit_code()
"""
if self._started is False:
return None
else:
return self.js._job_get_exit_code(self._id)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_created(self):
""" implements saga.adaptors.cpi.job.Job.get_created()
"""
if self._started is False:
return None
else:
return self.js._job_get_create_time(self._id)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_started(self):
""" implements saga.adaptors.cpi.job.Job.get_started()
"""
if self._started is False:
return None
else:
return self.js._job_get_start_time(self._id)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_finished(self):
""" implements saga.adaptors.cpi.job.Job.get_finished()
"""
if self._started is False:
return None
else:
return self.js._job_get_end_time(self._id)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_execution_hosts(self):
""" implements saga.adaptors.cpi.job.Job.get_execution_hosts()
"""
if self._started is False:
return None
else:
return self.js._job_get_execution_hosts(self._id)
added filestransfer to adaptor capabilities
#!/usr/bin/env python
# encoding: utf-8
""" Condor job adaptor implementation
"""
__author__ = "Ole Weidner"
__copyright__ = "Copyright 2013, The SAGA Project"
__license__ = "MIT"
import saga.utils.which
import saga.utils.pty_shell
import saga.adaptors.cpi.base
import saga.adaptors.cpi.job
from saga.job.constants import *
from transferdirectives import TransferDirectives
import re
import os
import time
from copy import deepcopy
from cgi import parse_qs
SYNC_CALL = saga.adaptors.cpi.decorators.SYNC_CALL
ASYNC_CALL = saga.adaptors.cpi.decorators.ASYNC_CALL
# --------------------------------------------------------------------
#
def log_error_and_raise(message, exception, logger):
logger.error(message)
raise exception(message)
# --------------------------------------------------------------------
#
def _condor_to_saga_jobstate(condorjs):
""" translates a condor one-letter state to saga
"""
# From: http://pages.cs.wisc.edu/~adesmet/status.html
#
# JobStatus in job ClassAds
#
# 0 Unexpanded U
# 1 Idle I
# 2 Running R
# 3 Removed X
# 4 Completed C
# 5 Held H
# 6 Submission_err E
if int(condorjs) == 0:
return saga.job.PENDING
elif int(condorjs) == 1:
return saga.job.PENDING
elif int(condorjs) == 2:
return saga.job.RUNNING
elif int(condorjs) == 3:
return saga.job.CANCELED
elif int(condorjs) == 4:
return saga.job.DONE
elif int(condorjs) == 5:
return saga.job.PENDING
elif int(condorjs) == 6:
return saga.job.FAILED
else:
return saga.job.UNKNOWN
# --------------------------------------------------------------------
#
def _condorscript_generator(url, logger, jd, option_dict=None):
""" generates a Condor script from a SAGA job description
"""
condor_file = str()
##### OPTIONS PASSED VIA JOB SERVICE URL #####
##
if option_dict is not None:
condor_file += "\n##### DEFAULT OPTIONS PASSED VIA JOB SERVICE URL #####\n##"
# special treatment for universe - defaults to 'vanilla'
if 'universe' not in option_dict:
condor_file += "\nuniverse = vanilla"
for (key, value) in option_dict.iteritems():
condor_file += "\n%s = %s" % (key, value)
##### OPTIONS PASSED VIA JOB DESCRIPTION #####
##
condor_file += "\n\n##### OPTIONS PASSED VIA JOB SERVICE URL #####\n##"
requirements = "requirements = "
# executable -> executable
if jd.executable is not None:
condor_file += "\nexecutable = %s" % jd.executable
# arguments -> arguments
arguments = "arguments = "
if jd.arguments is not None:
for arg in jd.arguments:
# Condor HATES double quotes in the arguments. It'll return
# some crap like: "Found illegal unescaped double-quote: ...
# That's why we esacpe them.
arguments += "%s " % (arg.replace('"', '\\"'))
condor_file += "\n%s" % arguments
# file_transfer -> transfer_input_files
if jd.file_transfer is not None:
td = TransferDirectives(jd.file_transfer)
if len(td.in_append_dict) > 0:
raise Exception('FileTransfer append syntax (>>) not supported by Condor: %s' % td.in_append_dict)
if len(td.out_append_dict) > 0:
raise Exception('FileTransfer append syntax (<<) not supported by Condor: %s' % td.out_append_dict)
if len(td.in_overwrite_dict) > 0:
transfer_input_files = "transfer_input_files = "
for (source, target) in td.in_overwrite_dict.iteritems():
# make sure source is file an not dir
(s_path, s_entry) = os.path.split(source)
if len(s_entry) < 1:
raise Exception('Condor accepts only files (not directories) as FileTransfer sources: %s' % source)
# make sure target is just a file
(t_path, t_entry) = os.path.split(target)
if len(t_path) > 1:
raise Exception('Condor accepts only filenames (without paths) as FileTransfer targets: %s' % target)
# make sure source and target file are the same
if s_entry != t_entry:
raise Exception('For Condor source file name and target file name have to be identical: %s != %s' % (s_entry, t_entry))
# entry ok - add to job script
transfer_input_files += "%s, " % source
condor_file += "\n%s" % transfer_input_files
if len(td.out_overwrite_dict) > 0:
transfer_output_files = "transfer_output_files = "
for (source, target) in td.out_overwrite_dict.iteritems():
# make sure source is file an not dir
(s_path, s_entry) = os.path.split(source)
if len(s_entry) < 1:
raise Exception('Condor accepts only files (not directories) as FileTransfer sources: %s' % source)
# make sure target is just a file
(t_path, t_entry) = os.path.split(target)
if len(t_path) > 1:
raise Exception('Condor accepts only filenames (without paths) as FileTransfer targets: %s' % target)
# make sure source and target file are the same
if s_entry != t_entry:
raise Exception('For Condor source file name and target file name have to be identical: %s != %s' % (s_entry, t_entry))
# entry ok - add to job script
transfer_output_files += "%s, " % source
condor_file += "\n%s" % transfer_output_files
# always define log
condor_file += "\nlog = saga-condor-job-$(cluster).log "
# output -> output
if jd.output is not None:
condor_file += "\noutput = %s " % jd.output
# error -> error
if jd.error is not None:
condor_file += "\nerror = %s " % jd.error
# environment -> environment
environment = "environment = "
if jd.environment is not None:
variable_list = str()
for key in jd.environment.keys():
variable_list += "%s=%s;" % (key, jd.environment[key])
environment += "%s " % variable_list
condor_file += "\n%s" % environment
# project -> +ProjectName
if jd.project is not None:
condor_file += "\n+ProjectName = \"%s\"" % str(jd.project)
# candidate hosts -> SiteList + requirements
if jd.candidate_hosts is not None:
hosts = ""
for host in jd.candidate_hosts:
hosts += "%s, " % host
sitelist = "+SiteList = \"%s\"" % hosts
requirements += "(stringListMember(GLIDEIN_ResourceName,SiteList) == True)"
condor_file += "\n%s" % sitelist
condor_file += "\n%s" % requirements
condor_file += "\n\nqueue"
return condor_file
# --------------------------------------------------------------------
# some private defs
#
_PTY_TIMEOUT = 2.0
# --------------------------------------------------------------------
# the adaptor name
#
_ADAPTOR_NAME = "saga.adaptor.condorjob"
_ADAPTOR_SCHEMAS = ["condor", "condor+ssh", "condor+gsissh"]
_ADAPTOR_OPTIONS = [
{
'category': 'saga.adaptor.condorjob',
'name': 'foo',
'type': bool,
'default': False,
'valid_options': [True, False],
'documentation': """Doc""",
'env_variable': None
},
]
# --------------------------------------------------------------------
# the adaptor capabilities & supported attributes
#
_ADAPTOR_CAPABILITIES = {
"jdes_attributes": [saga.job.NAME,
saga.job.EXECUTABLE,
saga.job.ARGUMENTS,
saga.job.ENVIRONMENT,
saga.job.INPUT,
saga.job.OUTPUT,
saga.job.ERROR,
saga.job.QUEUE,
saga.job.PROJECT,
saga.job.WALL_TIME_LIMIT,
saga.job.WORKING_DIRECTORY,
saga.job.CANDIDATE_HOSTS,
saga.job.TOTAL_CPU_COUNT,
saga.job.FILE_TRANSFER],
"job_attributes": [saga.job.EXIT_CODE,
saga.job.EXECUTION_HOSTS,
saga.job.CREATED,
saga.job.STARTED,
saga.job.FINISHED],
"metrics": [saga.job.STATE],
"contexts": {"ssh": "SSH public/private keypair",
"x509": "GSISSH X509 proxy context",
"userpass": "username/password pair (ssh)"}
}
# --------------------------------------------------------------------
# the adaptor documentation
#
_ADAPTOR_DOC = {
"name": _ADAPTOR_NAME,
"cfg_options": _ADAPTOR_OPTIONS,
"capabilities": _ADAPTOR_CAPABILITIES,
"description": """The Condor adaptor can run and manage jobs on local and
remote Condor gateways.""",
"details": """TODO""",
"schemas": {"condor": "connect to a local Condor gateway",
"condor+ssh": "conenct to a remote Condor gateway via SSH",
"condor+gsissh": "connect to a remote Condor gateway via GSISSH"}
}
# --------------------------------------------------------------------
# the adaptor info is used to register the adaptor with SAGA
#
_ADAPTOR_INFO = {
"name": _ADAPTOR_NAME,
"version": "v0.1",
"schemas": _ADAPTOR_SCHEMAS,
"cpis": [
{
"type": "saga.job.Service",
"class": "CondorJobService"
},
{
"type": "saga.job.Job",
"class": "CondorJob"
}
]
}
###############################################################################
# The adaptor class
class Adaptor (saga.adaptors.cpi.base.AdaptorBase):
""" this is the actual adaptor class, which gets loaded by SAGA (i.e. by
the SAGA engine), and which registers the CPI implementation classes
which provide the adaptor's functionality.
"""
# ----------------------------------------------------------------
#
def __init__(self):
saga.adaptors.cpi.base.AdaptorBase.__init__(self,
_ADAPTOR_INFO, _ADAPTOR_OPTIONS)
self.id_re = re.compile('^\[(.*)\]-\[(.*?)\]$')
self.opts = self.get_config()
self.foo = self.opts['foo'].get_value()
#self._logger.info('debug trace : %s' % self.debug_trace)
# ----------------------------------------------------------------
#
def sanity_check(self):
# FIXME: also check for gsissh
pass
# ----------------------------------------------------------------
#
def parse_id(self, id):
# split the id '[rm]-[pid]' in its parts, and return them.
match = self.id_re.match(id)
if not match or len(match.groups()) != 2:
raise saga.BadParameter("Cannot parse job id '%s'" % id)
return (match.group(1), match.group(2))
###############################################################################
#
class CondorJobService (saga.adaptors.cpi.job.Service):
""" implements saga.adaptors.cpi.job.Service
"""
# ----------------------------------------------------------------
#
def __init__(self, api, adaptor):
self._cpi_base = super(CondorJobService, self)
self._cpi_base.__init__(api, adaptor)
# ----------------------------------------------------------------
#
def __del__(self):
# FIXME: not sure if we should PURGE here -- that removes states which
# might not be evaluated, yet. Should we mark state evaluation
# separately?
# cmd_state () { touch $DIR/purgeable; ... }
# When should that be done?
#self._logger.error("adaptor dying... %s" % self.njobs)
#self._logger.trace()
self.finalize(kill_shell=True)
# ----------------------------------------------------------------
#
@SYNC_CALL
def init_instance(self, adaptor_state, rm_url, session):
""" service instance constructor
"""
# Turn this off by default.
self._disable_ptywrapper_logging = True
self.rm = rm_url
self.session = session
self.ppn = 0
self.is_cray = False
self.jobs = dict()
self.query_options = dict()
rm_scheme = rm_url.scheme
pty_url = deepcopy(rm_url)
# this adaptor supports options that can be passed via the
# 'query' component of the job service URL.
if rm_url.query is not None:
for key, val in parse_qs(rm_url.query).iteritems():
self.query_options[key] = val[0]
# we need to extrac the scheme for PTYShell. That's basically the
# job.Serivce Url withou the condor+ part. We use the PTYShell to execute
# condor commands either locally or via gsissh or ssh.
if rm_scheme == "condor":
pty_url.scheme = "fork"
elif rm_scheme == "condor+ssh":
pty_url.scheme = "ssh"
elif rm_scheme == "condor+gsissh":
pty_url.scheme = "gsissh"
# these are the commands that we need in order to interact with Condor.
# the adaptor will try to find them during initialize(self) and bail
# out in case they are note avaialbe.
self._commands = {'condor_version': None,
'condor_submit': None,
'condor_q': None,
'condor_rm': None}
if self._disable_ptywrapper_logging:
# create a null logger to silence the PTY wrapper!
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
nh = NullHandler()
null_logger = logging.getLogger("PTYShell").addHandler(nh)
self.shell = saga.utils.pty_shell.PTYShell(pty_url,
self.session, null_logger)
else:
self.shell = saga.utils.pty_shell.PTYShell(pty_url,
self.session)
self.shell.set_initialize_hook(self.initialize)
self.shell.set_finalize_hook(self.finalize)
self.initialize()
# ----------------------------------------------------------------
#
def initialize(self):
# check if all required condor tools are available
for cmd in self._commands.keys():
ret, out, _ = self.shell.run_sync("which %s " % cmd)
if ret != 0:
message = "Error finding Condor tools: %s" % out
log_error_and_raise(message, saga.NoSuccess, self._logger)
else:
path = out.strip() # strip removes newline
if cmd == 'condor_version':
ret, out, _ = self.shell.run_sync("%s" % cmd)
if ret != 0:
message = "Error determining Condor version: %s" % out
log_error_and_raise(message, saga.NoSuccess,
self._logger)
else:
# version is reported as:
# $CondorVersion: 7.8.6 Oct 25 2012 $
# $CondorPlatform: X86_64-CentOS_5.7 $
lines = out.split('\n')
version = lines[0].replace("$CondorVersion: ", "")
version = version.strip(" $")
# add path and version to the command dictionary
self._commands[cmd] = {"path": path,
"version": version}
self._logger.info("Found Condor tools: %s" % self._commands)
# ----------------------------------------------------------------
#
def finalize(self, kill_shell=False):
pass
# ----------------------------------------------------------------
#
def _job_run(self, jd):
""" runs a job via qsub
"""
# create a Condor job script from SAGA job description
script = _condorscript_generator(url=self.rm, logger=self._logger, jd=jd,
option_dict=self.query_options)
self._logger.debug("Generated Condor script: %s" % script)
ret, out, _ = self.shell.run_sync("echo \'%s\' | %s -" \
% (script, self._commands['condor_submit']['path']))
if ret != 0:
# something went wrong
message = "Error running job via 'condor_submit': %s. Script was: %s" \
% (out, script)
log_error_and_raise(message, saga.NoSuccess, self._logger)
else:
# stdout contains the job id
for line in out.split("\n"):
if "** Proc" in line:
pid = line.split()[2][:-1]
# we don't want the 'query' part of the URL to be part of the ID,
# simply because it can get terribly long (and ugly). to get rid
# of it, we clone the URL and set the query part to None.
rm_clone = deepcopy(self.rm)
rm_clone.query = ""
rm_clone.path = ""
job_id = "[%s]-[%s]" % (rm_clone, pid)
self._logger.info("Submitted Condor job with id: %s" % job_id)
# add job to internal list of known jobs.
self.jobs[job_id] = {
'state': saga.job.PENDING,
'exec_hosts': None,
'returncode': None,
'create_time': None,
'start_time': None,
'end_time': None,
'gone': False
}
return job_id
# ----------------------------------------------------------------
#
def _retrieve_job(self, job_id):
""" see if we can get some info about a job that we don't
know anything about
"""
rm, pid = self._adaptor.parse_id(job_id)
# run the Condor 'condor_q' command to get some infos about our job
ret, out, _ = self.shell.run_sync("%s -long %s | \
egrep '(JobStatus)|(ExitStatus)|(CompletionDate)'" \
% (self._commands['condor_q']['path'], pid))
if ret != 0:
message = "Couldn't reconnect to job '%s': %s" % (job_id, out)
log_error_and_raise(message, saga.NoSuccess, self._logger)
else:
# the job seems to exist on the backend. let's gather some data
job_info = {
'state': saga.job.UNKNOWN,
'exec_hosts': None,
'returncode': None,
'create_time': None,
'start_time': None,
'end_time': None,
'gone': False
}
results = out.split('\n')
for result in results:
if len(result.split('=')) == 2:
key, val = result.split('=')
key = key.strip() # strip() removes whitespaces at the
val = val.strip() # beginning and the end of the string
if key == 'JobStatus':
job_info['state'] = _condor_to_saga_jobstate(val)
elif key == 'ExitStatus':
job_info['returncode'] = val
elif key == 'CompletionDate':
job_info['end_time'] = val
return job_info
# ----------------------------------------------------------------
#
def _job_get_info(self, job_id):
""" get job attributes via condor_q
"""
# if we don't have the job in our dictionary, we don't want it
if job_id not in self.jobs:
message = "Unkown job ID: %s. Can't update state." % job_id
log_error_and_raise(message, saga.NoSuccess, self._logger)
# prev. info contains the info collect when _job_get_info
# was called the last time
prev_info = self.jobs[job_id]
# if the 'gone' flag is set, there's no need to query the job
# state again. it's gone forever
if prev_info['gone'] is True:
self._logger.warning("Job information is not available anymore.")
return prev_info
# curr. info will contain the new job info collect. it starts off
# as a copy of prev_info
curr_info = deepcopy(prev_info)
rm, pid = self._adaptor.parse_id(job_id)
# run the Condor 'condor_q' command to get some infos about our job
ret, out, _ = self.shell.run_sync("%s -long %s | \
egrep '(JobStatus)|(ExitStatus)|(CompletionDate)'" \
% (self._commands['condor_q']['path'], pid))
if ret != 0:
#if ("Unknown Job Id" in out):
# Let's see if the previous job state was runnig or pending. in
# that case, the job is gone now, which can either mean DONE,
# or FAILED. the only thing we can do is set it to 'DONE'
if prev_info['state'] in [saga.job.RUNNING, saga.job.PENDING]:
curr_info['state'] = saga.job.DONE
curr_info['gone'] = True
self._logger.warning("Previously running job has \
disappeared. This probably means that the backend doesn't store any information \
about finished jobs. Setting state to 'DONE'.")
else:
curr_info['gone'] = True
#else:
# # something went wrong
# message = "Error retrieving job info via 'condor_q': %s" % out
# log_error_and_raise(message, saga.NoSuccess, self._logger)
else:
# parse the egrep result. this should look something like this:
# JobStatus = 5
# ExitStatus = 0
# CompletionDate = 0
results = out.split('\n')
for result in results:
if len(result.split('=')) == 2:
key, val = result.split('=')
key = key.strip() # strip() removes whitespaces at the
val = val.strip() # beginning and the end of the string
if key == 'JobStatus':
curr_info['state'] = _condor_to_saga_jobstate(val)
elif key == 'ExitStatus':
curr_info['returncode'] = val
elif key == 'CompletionDate':
curr_info['end_time'] = val
# return the new job info dict
return curr_info
# ----------------------------------------------------------------
#
def _job_get_state(self, job_id):
""" get the job's state
"""
# check if we have already reach a terminal state
if self.jobs[job_id]['state'] == saga.job.CANCELED \
or self.jobs[job_id]['state'] == saga.job.FAILED \
or self.jobs[job_id]['state'] == saga.job.DONE:
return self.jobs[job_id]['state']
# check if we can / should update
if (self.jobs[job_id]['gone'] is not True):
self.jobs[job_id] = self._job_get_info(job_id=job_id)
return self.jobs[job_id]['state']
# ----------------------------------------------------------------
#
def _job_get_exit_code(self, job_id):
""" get the job's exit code
"""
# check if we can / should update
if (self.jobs[job_id]['gone'] is not True) \
and (self.jobs[job_id]['returncode'] is None):
self.jobs[job_id] = self._job_get_info(job_id=job_id)
return self.jobs[job_id]['returncode']
# ----------------------------------------------------------------
#
def _job_get_execution_hosts(self, job_id):
""" get the job's exit code
"""
# check if we can / should update
if (self.jobs[job_id]['gone'] is not True) \
and (self.jobs[job_id]['exec_hosts'] is None):
self.jobs[job_id] = self._job_get_info(job_id=job_id)
return self.jobs[job_id]['exec_hosts']
# ----------------------------------------------------------------
#
def _job_get_create_time(self, job_id):
""" get the job's creation time
"""
# check if we can / should update
if (self.jobs[job_id]['gone'] is not True) \
and (self.jobs[job_id]['create_time'] is None):
self.jobs[job_id] = self._job_get_info(job_id=job_id)
return self.jobs[job_id]['create_time']
# ----------------------------------------------------------------
#
def _job_get_start_time(self, job_id):
""" get the job's start time
"""
# check if we can / should update
if (self.jobs[job_id]['gone'] is not True) \
and (self.jobs[job_id]['start_time'] is None):
self.jobs[job_id] = self._job_get_info(job_id=job_id)
return self.jobs[job_id]['start_time']
# ----------------------------------------------------------------
#
def _job_get_end_time(self, job_id):
""" get the job's end time
"""
# check if we can / should update
if (self.jobs[job_id]['gone'] is not True) \
and (self.jobs[job_id]['end_time'] is None):
self.jobs[job_id] = self._job_get_info(job_id=job_id)
return self.jobs[job_id]['end_time']
# ----------------------------------------------------------------
#
def _job_cancel(self, job_id):
""" cancel the job via 'condor_rm'
"""
rm, pid = self._adaptor.parse_id(job_id)
ret, out, _ = self.shell.run_sync("%s %s\n" \
% (self._commands['condor_rm']['path'], pid))
if ret != 0:
message = "Error canceling job via 'condor_rm': %s" % out
log_error_and_raise(message, saga.NoSuccess, self._logger)
# assume the job was succesfully canceld
self.jobs[job_id]['state'] = saga.job.CANCELED
# ----------------------------------------------------------------
#
def _job_wait(self, job_id, timeout):
""" wait for the job to finish or fail
"""
time_start = time.time()
time_now = time_start
rm, pid = self._adaptor.parse_id(job_id)
while True:
state = self._job_get_state(job_id=job_id)
if state == saga.job.DONE or \
state == saga.job.FAILED or \
state == saga.job.CANCELED:
return True
# avoid busy poll
time.sleep(0.5)
# check if we hit timeout
if timeout >= 0:
time_now = time.time()
if time_now - time_start > timeout:
return False
# ----------------------------------------------------------------
#
@SYNC_CALL
def create_job(self, jd):
""" implements saga.adaptors.cpi.job.Service.get_url()
"""
# check that only supported attributes are provided
for attribute in jd.list_attributes():
if attribute not in _ADAPTOR_CAPABILITIES["jdes_attributes"]:
message = "'jd.%s' is not supported by this adaptor" \
% attribute
log_error_and_raise(message, saga.BadParameter, self._logger)
# this dict is passed on to the job adaptor class -- use it to pass any
# state information you need there.
adaptor_state = {"job_service": self,
"job_description": jd,
"job_schema": self.rm.schema,
"reconnect": False
}
return saga.job.Job(_adaptor=self._adaptor,
_adaptor_state=adaptor_state)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_job(self, jobid):
""" Implements saga.adaptors.cpi.job.Service.get_job()
"""
# try to get some information about this job and throw it into
# our job dictionary.
self.jobs[jobid] = self._retrieve_job(jobid)
# this dict is passed on to the job adaptor class -- use it to pass any
# state information you need there.
adaptor_state = {"job_service": self,
# TODO: fill job description
"job_description": saga.job.Description(),
"job_schema": self.rm.schema,
"reconnect": True,
"reconnect_jobid": jobid
}
return saga.job.Job(_adaptor=self._adaptor,
_adaptor_state=adaptor_state)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_url(self):
""" implements saga.adaptors.cpi.job.Service.get_url()
"""
return self.rm
# ----------------------------------------------------------------
#
@SYNC_CALL
def list(self):
""" implements saga.adaptors.cpi.job.Service.list()
"""
ids = []
ret, out, _ = self.shell.run_sync("%s | grep `whoami`"\
% self._commands['condor_q']['path'])
if ret != 0 and len(out) > 0:
message = "failed to list jobs via 'condor_q': %s" % out
log_error_and_raise(message, saga.NoSuccess, self._logger)
elif ret != 0 and len(out) == 0:
pass
else:
for line in out.split("\n"):
# output looks like this:
# 112059.svc.uc.futuregrid testjob oweidner 0 Q batch
# 112061.svc.uc.futuregrid testjob oweidner 0 Q batch
if len(line.split()) > 1:
rm_clone = deepcopy(self.rm)
rm_clone.query = ""
rm_clone.path = ""
jobid = "[%s]-[%s]" % (rm_clone, line.split()[0])
ids.append(str(jobid))
return ids
# # ----------------------------------------------------------------
# #
# def container_run (self, jobs) :
# self._logger.debug ("container run: %s" % str(jobs))
# # TODO: this is not optimized yet
# for job in jobs:
# job.run ()
#
#
# # ----------------------------------------------------------------
# #
# def container_wait (self, jobs, mode, timeout) :
# self._logger.debug ("container wait: %s" % str(jobs))
# # TODO: this is not optimized yet
# for job in jobs:
# job.wait ()
#
#
# # ----------------------------------------------------------------
# #
# def container_cancel (self, jobs) :
# self._logger.debug ("container cancel: %s" % str(jobs))
# raise saga.NoSuccess ("Not Implemented");
###############################################################################
#
class CondorJob (saga.adaptors.cpi.job.Job):
""" implements saga.adaptors.cpi.job.Job
"""
def __init__(self, api, adaptor):
# initialize parent class
self._cpi_base = super(CondorJob, self)
self._cpi_base.__init__(api, adaptor)
@SYNC_CALL
def init_instance(self, job_info):
""" implements saga.adaptors.cpi.job.Job.init_instance()
"""
# init_instance is called for every new saga.job.Job object
# that is created
self.jd = job_info["job_description"]
self.js = job_info["job_service"]
if job_info['reconnect'] is True:
self._id = job_info['reconnect_jobid']
self._started = True
else:
self._id = None
self._started = False
return self.get_api()
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_state(self):
""" mplements saga.adaptors.cpi.job.Job.get_state()
"""
if self._started is False:
# jobs that are not started are always in 'NEW' state
return saga.job.NEW
else:
return self.js._job_get_state(self._id)
# ----------------------------------------------------------------
#
@SYNC_CALL
def wait(self, timeout):
""" implements saga.adaptors.cpi.job.Job.wait()
"""
if self._started is False:
log_error_and_raise("Can't wait for job that hasn't been started",
saga.IncorrectState, self._logger)
else:
self.js._job_wait(self._id, timeout)
# ----------------------------------------------------------------
#
@SYNC_CALL
def cancel(self, timeout):
""" implements saga.adaptors.cpi.job.Job.cancel()
"""
if self._started is False:
log_error_and_raise("Can't wait for job that hasn't been started",
saga.IncorrectState, self._logger)
else:
self.js._job_cancel(self._id)
# ----------------------------------------------------------------
#
@SYNC_CALL
def run(self):
""" implements saga.adaptors.cpi.job.Job.run()
"""
self._id = self.js._job_run(self.jd)
self._started = True
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_service_url(self):
""" implements saga.adaptors.cpi.job.Job.get_service_url()
"""
return self.js.rm
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_id(self):
""" implements saga.adaptors.cpi.job.Job.get_id()
"""
return self._id
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_exit_code(self):
""" implements saga.adaptors.cpi.job.Job.get_exit_code()
"""
if self._started is False:
return None
else:
return self.js._job_get_exit_code(self._id)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_created(self):
""" implements saga.adaptors.cpi.job.Job.get_created()
"""
if self._started is False:
return None
else:
return self.js._job_get_create_time(self._id)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_started(self):
""" implements saga.adaptors.cpi.job.Job.get_started()
"""
if self._started is False:
return None
else:
return self.js._job_get_start_time(self._id)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_finished(self):
""" implements saga.adaptors.cpi.job.Job.get_finished()
"""
if self._started is False:
return None
else:
return self.js._job_get_end_time(self._id)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_execution_hosts(self):
""" implements saga.adaptors.cpi.job.Job.get_execution_hosts()
"""
if self._started is False:
return None
else:
return self.js._job_get_execution_hosts(self._id)
|
Fixed errors
|
# flake8: noqa: E128
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-05-24 12:20
from __future__ import unicode_literals
from django.db import migrations
from django.contrib.contenttypes.models import ContentType
from django.core.management.sql import emit_post_migrate_signal
def add_molo_forms_permissions(apps, schema_editor):
db_alias = schema_editor.connection.alias
emit_post_migrate_signal(2, False, db_alias)
Group = apps.get_model('auth.Group')
Permission = apps.get_model('auth.Permission')
GroupPagePermission = apps.get_model('wagtailcore.GroupPagePermission')
FormsIndexPage = apps.get_model('forms.FormsIndexPage')
# **** Get IndexPages ****
forms = FormsIndexPage.objects.all()
# **** Get Permission ****
# Wagtail
access_admin = get_permission(Permission, 'access_admin')
# Forms
FormsSegmentUserGroup, created = ContentType.objects.get_or_create(
app_label='forms', model='FormsSegmentUserGroup')
Permission.objects.create(
name='add_segmentusergroup',
codename='add_segmentusergroup',
content_type_id=FormsSegmentUserGroup.pk)
Permission.objects.create(
name='change_segmentusergroup',
codename='change_segmentusergroup',
content_type_id=FormsSegmentUserGroup.pk)
Permission.objects.create(
name='delete_segmentusergroup',
codename='delete_segmentusergroup',
content_type_id=FormsSegmentUserGroup.pk)
add_segmentusergroup = get_permission(
Permission, 'add_segmentusergroup')
change_segmentusergroup = get_permission(
Permission, 'change_segmentusergroup')
delete_segmentusergroup = get_permission(
Permission, 'delete_segmentusergroup')
add_segment = get_permission(Permission, 'add_segment')
change_segment = get_permission(Permission, 'change_segment')
delete_segment = get_permission(Permission, 'delete_segment')
# Wagtail Page permission
page_permission_types = ('add', 'edit', 'publish', 'bulk_delete', 'lock')
# **** Add wagtail groups permission ****
# <----- Product Admin ----->
product_admin_group = get_or_create_group(Group, 'product_admin')
# Page permissions
create_page_permission(
GroupPagePermission, product_admin_group, forms, page_permission_types)
# <----- Data Admin ----->
data_admin_group = get_or_create_group(Group, 'data_admin')
# Page permissions
create_page_permission(
GroupPagePermission, data_admin_group, forms, page_permission_types)
# <----- Content Admin ----->
content_admin_group = get_or_create_group(Group, 'content_admin')
# Page permissions
create_page_permission(
GroupPagePermission, content_admin_group, forms, page_permission_types)
def get_or_create_group(Group, group_name):
group, _created = Group.objects.get_or_create(name=group_name)
return group
def get_permission(Permission, code_name):
return Permission.objects.get(codename=code_name)
def create_page_permission(GroupPagePermission, group, pages, page_permission_type):
for page in pages.iterator():
for permission_type in page_permission_type:
GroupPagePermission.objects.get_or_create(
group=group, page=page, permission_type=permission_type)
class Migration(migrations.Migration):
dependencies = [
('gem', '0037_remove_content_editor_survey_permissions'),
('core', '0077_molo_page'),
('forms', '0002_create_forms_index_page'),
('contenttypes', '0002_remove_content_type_name'),
('wagtailcore', '0032_add_bulk_delete_page_permission'),
('wagtailadmin', '0001_create_admin_access_permissions'),
('wagtailusers', '0005_make_related_name_wagtail_specific'),
('sites', '0002_alter_domain_unique'),
('auth', '0007_alter_validators_add_error_messages'),
]
operations = [
migrations.RunPython(add_molo_forms_permissions),
]
fix get_permission helper to get or created object
# flake8: noqa: E128
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-05-24 12:20
from __future__ import unicode_literals
from django.db import migrations
from django.contrib.contenttypes.models import ContentType
from django.core.management.sql import emit_post_migrate_signal
def add_molo_forms_permissions(apps, schema_editor):
db_alias = schema_editor.connection.alias
emit_post_migrate_signal(2, False, db_alias)
Group = apps.get_model('auth.Group')
Permission = apps.get_model('auth.Permission')
GroupPagePermission = apps.get_model('wagtailcore.GroupPagePermission')
FormsIndexPage = apps.get_model('forms.FormsIndexPage')
# **** Get IndexPages ****
forms = FormsIndexPage.objects.all()
# **** Get Permission ****
# Wagtail
access_admin, created = get_permission(Permission, 'access_admin')
# Forms
FormsSegmentUserGroup, created = ContentType.objects.get_or_create(
app_label='forms', model='FormsSegmentUserGroup')
Permission.objects.create(
name='add_segmentusergroup',
codename='add_segmentusergroup',
content_type_id=FormsSegmentUserGroup.pk)
Permission.objects.create(
name='change_segmentusergroup',
codename='change_segmentusergroup',
content_type_id=FormsSegmentUserGroup.pk)
Permission.objects.create(
name='delete_segmentusergroup',
codename='delete_segmentusergroup',
content_type_id=FormsSegmentUserGroup.pk)
add_segmentusergroup, created = get_permission(
Permission, 'add_segmentusergroup')
change_segmentusergroup, created = get_permission(
Permission, 'change_segmentusergroup')
delete_segmentusergroup, created = get_permission(
Permission, 'delete_segmentusergroup')
add_segment, created = get_permission(Permission, 'add_segment')
change_segment, created = get_permission(Permission, 'change_segment')
delete_segment, created = get_permission(Permission, 'delete_segment')
# Wagtail Page permission
page_permission_types = ('add', 'edit', 'publish', 'bulk_delete', 'lock')
# **** Add wagtail groups permission ****
# <----- Product Admin ----->
product_admin_group = get_or_create_group(Group, 'product_admin')
# Page permissions
create_page_permission(
GroupPagePermission, product_admin_group, forms, page_permission_types)
# <----- Data Admin ----->
data_admin_group = get_or_create_group(Group, 'data_admin')
# Page permissions
create_page_permission(
GroupPagePermission, data_admin_group, forms, page_permission_types)
# <----- Content Admin ----->
content_admin_group = get_or_create_group(Group, 'content_admin')
# Page permissions
create_page_permission(
GroupPagePermission, content_admin_group, forms, page_permission_types)
def get_or_create_group(Group, group_name):
group, _created = Group.objects.get_or_create(name=group_name)
return group
def get_permission(Permission, code_name):
return Permission.objects.get_or_create(codename=code_name)
def create_page_permission(GroupPagePermission, group, pages, page_permission_type):
for page in pages.iterator():
for permission_type in page_permission_type:
GroupPagePermission.objects.get_or_create(
group=group, page=page, permission_type=permission_type)
class Migration(migrations.Migration):
dependencies = [
('gem', '0037_remove_content_editor_survey_permissions'),
('core', '0077_molo_page'),
('forms', '0002_create_forms_index_page'),
('contenttypes', '0002_remove_content_type_name'),
('wagtailcore', '0032_add_bulk_delete_page_permission'),
('wagtailadmin', '0001_create_admin_access_permissions'),
('wagtailusers', '0005_make_related_name_wagtail_specific'),
('sites', '0002_alter_domain_unique'),
('auth', '0007_alter_validators_add_error_messages'),
]
operations = [
migrations.RunPython(add_molo_forms_permissions),
]
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# Imports =====================================================================
import os.path
import xmltodict
from settings import LOG_FN
from settings import REQUEST_FN
from settings import RESPONSE_FN
from settings import DATABASE_FN
from settings import DATABASE_KEY
from settings import EXPORT_XSD_LINK
from settings import LOGGING_ENABLED
from shelvedb import ShelveDatabase
from structures import LinkUpdateResponse
# Classes =====================================================================
class RequestDatabase(ShelveDatabase):
"""
Keep the track of requests and resposes and their serialization and
deserialization from/to XMl.
"""
def __init__(self,
log_fn=LOG_FN,
db_fn=DATABASE_FN,
db_key=DATABASE_KEY,
logging=LOGGING_ENABLED,
req_fn=REQUEST_FN,
resp_fn=RESPONSE_FN,
xsd_url=EXPORT_XSD_LINK):
super(RequestDatabase, self).__init__(
log_fn=log_fn,
db_fn=db_fn,
db_key=db_key,
logging=logging,
)
self.req_fn = req_fn #: Path to the request XML.
self.resp_fn = resp_fn #: Path to the response XML
self.xsd_url = xsd_url
self._req_queue = {}
self._resp_queue = []
def add_request(self, request):
"""
Add new `request` object to database.
Args:
request (obj): Object with defined :attr:`session_id` property and
:meth:`to_dict_xml` method.
"""
if not (hasattr(request, "session_id") and
hasattr(request, "to_dict_xml")):
raise ValueError(
"Object must have .session_id property and .to_dict_xml() "
"method!"
)
self._req_queue[request.session_id] = request
self.log(
"Received request session_id(%s): %s" % (
request.session_id,
repr(request)
)
)
def _add_response(self, response):
"""
Add responese from XML to the internal queue.
Args:
response (obj): :class:`.LinkUpdateResponse` object.
"""
self._resp_queue.append(response)
if response.session_id in self._req_queue:
del self._req_queue[response.session_id]
self.log("Received response session_id(%s)." % response.session_id)
def _process_responses(self):
"""
Go thru response XML (:attr:`.resp_fn`) and put them all in the
response queue using :meth:`_add_response`.
"""
if not os.path.exists(self.resp_fn):
self.log(
"._process_responses() called, "
"but '%s' not found." % self.resp_fn
)
return
with open(self.resp_fn) as resp_f:
xml = resp_f.read()
xdom = xmltodict.parse(xml)
# parse XML
results = xdom.get("results", {}).get("result", [])
if type(results) not in [list, tuple]:
results = [results]
# convert XML results to LinkUpdateResponse structure
for result in results:
# to allow ** creation of namedtuple
result["session_id"] = result["@session_id"]
del result["@session_id"]
# copy reason or set it to None
result["reason"] = result.get("reason", None)
self._add_response(LinkUpdateResponse(**result))
os.unlink(self.resp_fn)
self.log(
"Aleph response queue processed. Got %d responses." % len(results)
)
def get_responses(self):
"""
Process response queue, remove finished requests from request queue,
return list of response objects.
Returns:
list: List of :class:`.LinkUpdateResponse` objects.
"""
self._process_responses()
session_ids = ", ".join(
resp.session_id
for resp in self._resp_queue
)
if session_ids:
self.log("Sent back responses for: session_id(%s)." % session_ids)
else:
self.log(".get_repsponses(): No requests returned.")
responses = self._resp_queue
self._resp_queue = []
return responses
def to_xml(self):
"""
Convert :attr:`_req_queue` to XML as defined in request XSD.
Returns:
unicode: XML.
"""
if not self._req_queue:
return xmltodict.unparse({"records": None}, pretty=True)
record_dicts = [
rec.to_dict_xml()
for rec in self._req_queue.values()
]
return xmltodict.unparse(
{
"records": {
"record": record_dicts,
"@xsi:schemaLocation": self.xsd_url,
"@xmlns": self.xsd_url.replace(".xsd", ""),
"@xmlns:xsi": "http://www.w3.org/2001/XMLSchema-instance",
}
},
pretty=True
)
def save(self):
"""
Read the response XML, process it, save the database and request XML.
"""
# write request XML
with open(self.req_fn, "w") as req_f:
req_f.write(self.to_xml())
super(RequestDatabase, self).save()
@staticmethod
def load(fn=DATABASE_FN, db_key=DATABASE_KEY,
creator=lambda fn: RequestDatabase(db_fn=fn)):
"""
Load the database from the shelve `fn`.
Args:
fn (str): Path to the database file. Default
:attr:`.DATABASE_FN`.
db_key (str): What database key to use. Default
:attr:`.DATABASE_KEY`.
creator (fn reference): Reference to the function, which will
create new :class:`.RequestDatabase` if the old is not
found. Default lambda, which expects `fn` parameter
``lambda fn: ..``.
Returns:
obj: :class:`.RequestDatabase` instance from the `fn` or newly
created.
"""
return ShelveDatabase.load(creator=creator, fn=fn, db_key=db_key)
Fixed #13 - typo in log.
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# Imports =====================================================================
import os.path
import xmltodict
from settings import LOG_FN
from settings import REQUEST_FN
from settings import RESPONSE_FN
from settings import DATABASE_FN
from settings import DATABASE_KEY
from settings import EXPORT_XSD_LINK
from settings import LOGGING_ENABLED
from shelvedb import ShelveDatabase
from structures import LinkUpdateResponse
# Classes =====================================================================
class RequestDatabase(ShelveDatabase):
"""
Keep the track of requests and resposes and their serialization and
deserialization from/to XMl.
"""
def __init__(self,
log_fn=LOG_FN,
db_fn=DATABASE_FN,
db_key=DATABASE_KEY,
logging=LOGGING_ENABLED,
req_fn=REQUEST_FN,
resp_fn=RESPONSE_FN,
xsd_url=EXPORT_XSD_LINK):
super(RequestDatabase, self).__init__(
log_fn=log_fn,
db_fn=db_fn,
db_key=db_key,
logging=logging,
)
self.req_fn = req_fn #: Path to the request XML.
self.resp_fn = resp_fn #: Path to the response XML
self.xsd_url = xsd_url
self._req_queue = {}
self._resp_queue = []
def add_request(self, request):
"""
Add new `request` object to database.
Args:
request (obj): Object with defined :attr:`session_id` property and
:meth:`to_dict_xml` method.
"""
if not (hasattr(request, "session_id") and
hasattr(request, "to_dict_xml")):
raise ValueError(
"Object must have .session_id property and .to_dict_xml() "
"method!"
)
self._req_queue[request.session_id] = request
self.log(
"Received request session_id(%s): %s" % (
request.session_id,
repr(request)
)
)
def _add_response(self, response):
"""
Add responese from XML to the internal queue.
Args:
response (obj): :class:`.LinkUpdateResponse` object.
"""
self._resp_queue.append(response)
if response.session_id in self._req_queue:
del self._req_queue[response.session_id]
self.log("Received response session_id(%s)." % response.session_id)
def _process_responses(self):
"""
Go thru response XML (:attr:`.resp_fn`) and put them all in the
response queue using :meth:`_add_response`.
"""
if not os.path.exists(self.resp_fn):
self.log(
"._process_responses() called, "
"but '%s' not found." % self.resp_fn
)
return
with open(self.resp_fn) as resp_f:
xml = resp_f.read()
xdom = xmltodict.parse(xml)
# parse XML
results = xdom.get("results", {}).get("result", [])
if type(results) not in [list, tuple]:
results = [results]
# convert XML results to LinkUpdateResponse structure
for result in results:
# to allow ** creation of namedtuple
result["session_id"] = result["@session_id"]
del result["@session_id"]
# copy reason or set it to None
result["reason"] = result.get("reason", None)
self._add_response(LinkUpdateResponse(**result))
os.unlink(self.resp_fn)
self.log(
"Aleph response queue processed. Got %d responses." % len(results)
)
def get_responses(self):
"""
Process response queue, remove finished requests from request queue,
return list of response objects.
Returns:
list: List of :class:`.LinkUpdateResponse` objects.
"""
self._process_responses()
session_ids = ", ".join(
resp.session_id
for resp in self._resp_queue
)
if session_ids:
self.log("Sent back responses for: session_id(%s)." % session_ids)
else:
self.log(".get_responses(): No requests returned.")
responses = self._resp_queue
self._resp_queue = []
return responses
def to_xml(self):
"""
Convert :attr:`_req_queue` to XML as defined in request XSD.
Returns:
unicode: XML.
"""
if not self._req_queue:
return xmltodict.unparse({"records": None}, pretty=True)
record_dicts = [
rec.to_dict_xml()
for rec in self._req_queue.values()
]
return xmltodict.unparse(
{
"records": {
"record": record_dicts,
"@xsi:schemaLocation": self.xsd_url,
"@xmlns": self.xsd_url.replace(".xsd", ""),
"@xmlns:xsi": "http://www.w3.org/2001/XMLSchema-instance",
}
},
pretty=True
)
def save(self):
"""
Read the response XML, process it, save the database and request XML.
"""
# write request XML
with open(self.req_fn, "w") as req_f:
req_f.write(self.to_xml())
super(RequestDatabase, self).save()
@staticmethod
def load(fn=DATABASE_FN, db_key=DATABASE_KEY,
creator=lambda fn: RequestDatabase(db_fn=fn)):
"""
Load the database from the shelve `fn`.
Args:
fn (str): Path to the database file. Default
:attr:`.DATABASE_FN`.
db_key (str): What database key to use. Default
:attr:`.DATABASE_KEY`.
creator (fn reference): Reference to the function, which will
create new :class:`.RequestDatabase` if the old is not
found. Default lambda, which expects `fn` parameter
``lambda fn: ..``.
Returns:
obj: :class:`.RequestDatabase` instance from the `fn` or newly
created.
"""
return ShelveDatabase.load(creator=creator, fn=fn, db_key=db_key)
|
""" ..
"""
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.conf.urls import patterns, url
from django.utils.functional import cached_property
from django.conf import settings
from . import emitters, exceptions, parsers
class Resource(object):
#! Default list of allowed HTTP methods.
http_allowed_methods = [
'get',
'post',
'put',
'delete',
# TODO: 'patch',
# ..
# TODO: 'head',
# <http://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html#sec9.4>
# TODO: 'options'
# <http://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html#sec9.2>
]
#! The list of method names that we understand but do not neccesarily
#! support
http_method_names = [
'get',
'post',
'put',
'delete',
'patch',
'options',
'head',
'connect',
'trace',
]
#! Name of the resource to use in URIs; defaults to `__name__.lower()`.
name = None
def __init__(self):
# Initialize name to be the name of the instantiated class if it was
# not defined in the class definition.
if self.name is None:
self.name = self.__class__.__name__.lower()
def find_emitter(self, request, **kwargs):
"""
Determines the format to emit to and stores it upon success. Raises
a proper exception if it cannot.
"""
# Check locations where format may be defined in order of
# precendence.
if kwargs.get('format') is not None:
# Format was provided through the URL via `.FORMAT`.
self.emitter = emitters.get_by_name(kwargs['format'])
else:
# TODO: Should not have an else here and allow the header even
# if the format check failed ?
self.emitter = emitters.get_by_request(request)
if self.emitter is None:
# Failed to find an appropriate emitter
# Get dictionary of available formats
available = emitters.get_available()
# TODO: No idea what to emit it with; using JSON for now
# TODO: This should be a configurable property perhaps ?
self.emitter = emitters.Json
# Emit the response using the appropriate exception
raise exceptions.NotAcceptable(self.emit(available))
def find_parser(self, request, **kwargs):
"""
Determines the format to parse to and stores it upon success. Raises
a proper exception if it cannot.
"""
self.parser = parsers.get(request)
if self.parser is None:
# Failed to find an appropriate parser; we have no idea how to
# handle the data.
raise exceptions.UnsupportedMediaType()
def emit(self, obj=None, status=200):
"""Transforms python objects to an acceptable format for tansmission.
"""
response = HttpResponse(status=status)
if obj is not None:
response.content = self.emitter.emit(obj)
response['Content-Type'] = self.emitter.get_mimetype()
else:
# No need to specify the default content-type if we don't
# have a body.
del response['Content-Type']
return response
def parse(self, request):
"""Transforms recieved data to valid python objects.
"""
# TODO: anything else to do here ?
return self.parser.parse(request)
# TODO: add some magic to make this a class method
@cached_property
def allow_header(self):
allow = [m.upper() for m in self.http_allowed_methods]
return ', '.join(allow).strip()
def find_method(self, method):
"""Ensures method is acceptable; throws appropriate exception elsewise.
"""
method_name = method.lower()
if method_name not in self.http_method_names:
# Method not understood by our library. Die.
response = HttpResponse()
raise exceptions.NotImplemented(response)
method = getattr(self, method_name, None)
if method_name not in self.http_allowed_methods or method is None:
# Method understood but not allowed. Die.
response = HttpResponse()
response['Allow'] = self.allow_header()
raise exceptions.MethodNotAllowed(response)
# Method is allowed, continue.
return method
@csrf_exempt
def dispatch(self, request, *args, **kwargs):
# ..
try:
# Ensure the request method is present in the list of
# allowed HTTP methods
method = self.find_method(request.method)
# Request an emitter as early as possible in order to
# accurately return errors (if accrued).
self.find_emitter(request, **kwargs)
# TODO: Authn check
# TODO: Authz check
# By default, there is no object (for get and delete requests)
obj = None
if request.body:
# Request a parse and proceed to parse the request.
self.find_parser(request)
obj = self.parse(request)
# TODO: Authz check (w/object)
# Delegate to an appropriate method
method = getattr(self, request.method.lower())
return method(request, obj, **kwargs)
# DEBUG: Returning just what we got
# return self.emit()
except exceptions.Error as ex:
# TODO: We need to emit the error response.
return ex.response
except BaseException as ex:
# TODO: `del response['Content-Type']` needs to generalized
# somewhere; its everywhere
if settings.DEBUG:
response = self.emit({
'name': ex.__class__.__name__,
'message': str(ex),
})
response.status_code = 501
else:
# Return no body
response = http.HttpResponseServerError()
del response['Content-Type']
return response
def read(self, request, **kwargs):
raise exceptions.NotImplemented()
def get(self, request, obj=None, **kwargs):
# TODO: caching, pagination
# Delegate to `read` to actually grab a list of items
items = self.read(request, **kwargs)
# Emit the list of read items.
return self.emit(items)
def post(self, request, obj, **kwargs):
raise exceptions.NotImplemented()
def put(self, request, obj, *args, **kwargs):
raise exceptions.NotImplemented()
def delete(self, request, obj, *args, **kwargs):
raise exceptions.NotImplemented()
@cached_property
def urls(self):
"""Constructs the URLs that this resource will respond to.
"""
pattern = '^{}{{}}(?:\.(?P<format>[^/]*?))?/?$'.format(self.name)
return patterns('',
url(pattern.format(''), self.dispatch, name='dispatch'),
url(pattern.format('/(?P<id>.*)'), self.dispatch, name='dispatch')
)
class Model(Resource):
"""Implementation of `Resource` for django's models.
"""
#! The class object of the django model this resource is exposing.
model = None
def read(self, request, **kwargs):
# TODO: filtering
return self.model.objects.all()
Import error fixed.
""" ..
"""
from django.http import HttpResponse, HttpResponseServerError
from django.views.decorators.csrf import csrf_exempt
from django.conf.urls import patterns, url
from django.utils.functional import cached_property
from django.conf import settings
from . import emitters, exceptions, parsers
class Resource(object):
#! Default list of allowed HTTP methods.
http_allowed_methods = [
'get',
'post',
'put',
'delete',
# TODO: 'patch',
# ..
# TODO: 'head',
# <http://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html#sec9.4>
# TODO: 'options'
# <http://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html#sec9.2>
]
#! The list of method names that we understand but do not neccesarily
#! support
http_method_names = [
'get',
'post',
'put',
'delete',
'patch',
'options',
'head',
'connect',
'trace',
]
#! Name of the resource to use in URIs; defaults to `__name__.lower()`.
name = None
def __init__(self):
# Initialize name to be the name of the instantiated class if it was
# not defined in the class definition.
if self.name is None:
self.name = self.__class__.__name__.lower()
def find_emitter(self, request, **kwargs):
"""
Determines the format to emit to and stores it upon success. Raises
a proper exception if it cannot.
"""
# Check locations where format may be defined in order of
# precendence.
if kwargs.get('format') is not None:
# Format was provided through the URL via `.FORMAT`.
self.emitter = emitters.get_by_name(kwargs['format'])
else:
# TODO: Should not have an else here and allow the header even
# if the format check failed ?
self.emitter = emitters.get_by_request(request)
if self.emitter is None:
# Failed to find an appropriate emitter
# Get dictionary of available formats
available = emitters.get_available()
# TODO: No idea what to emit it with; using JSON for now
# TODO: This should be a configurable property perhaps ?
self.emitter = emitters.Json
# Emit the response using the appropriate exception
raise exceptions.NotAcceptable(self.emit(available))
def find_parser(self, request, **kwargs):
"""
Determines the format to parse to and stores it upon success. Raises
a proper exception if it cannot.
"""
self.parser = parsers.get(request)
if self.parser is None:
# Failed to find an appropriate parser; we have no idea how to
# handle the data.
raise exceptions.UnsupportedMediaType()
def emit(self, obj=None, status=200):
"""Transforms python objects to an acceptable format for tansmission.
"""
response = HttpResponse(status=status)
if obj is not None:
response.content = self.emitter.emit(obj)
response['Content-Type'] = self.emitter.get_mimetype()
else:
# No need to specify the default content-type if we don't
# have a body.
del response['Content-Type']
return response
def parse(self, request):
"""Transforms recieved data to valid python objects.
"""
# TODO: anything else to do here ?
return self.parser.parse(request)
# TODO: add some magic to make this a class method
@cached_property
def allow_header(self):
allow = [m.upper() for m in self.http_allowed_methods]
return ', '.join(allow).strip()
def find_method(self, method):
"""Ensures method is acceptable; throws appropriate exception elsewise.
"""
method_name = method.lower()
if method_name not in self.http_method_names:
# Method not understood by our library. Die.
response = HttpResponse()
raise exceptions.NotImplemented(response)
method = getattr(self, method_name, None)
if method_name not in self.http_allowed_methods or method is None:
# Method understood but not allowed. Die.
response = HttpResponse()
response['Allow'] = self.allow_header()
raise exceptions.MethodNotAllowed(response)
# Method is allowed, continue.
return method
@csrf_exempt
def dispatch(self, request, *args, **kwargs):
# ..
try:
# Ensure the request method is present in the list of
# allowed HTTP methods
method = self.find_method(request.method)
# Request an emitter as early as possible in order to
# accurately return errors (if accrued).
self.find_emitter(request, **kwargs)
# TODO: Authn check
# TODO: Authz check
# By default, there is no object (for get and delete requests)
obj = None
if request.body:
# Request a parse and proceed to parse the request.
self.find_parser(request)
obj = self.parse(request)
# TODO: Authz check (w/object)
# Delegate to an appropriate method
method = getattr(self, request.method.lower())
return method(request, obj, **kwargs)
# DEBUG: Returning just what we got
# return self.emit()
except exceptions.Error as ex:
# TODO: We need to emit the error response.
return ex.response
except BaseException as ex:
# TODO: `del response['Content-Type']` needs to generalized
# somewhere; its everywhere
if settings.DEBUG:
response = self.emit({
'name': ex.__class__.__name__,
'message': str(ex),
})
response.status_code = 501
else:
# Return no body
response = HttpResponseServerError()
del response['Content-Type']
return response
def read(self, request, **kwargs):
raise exceptions.NotImplemented()
def get(self, request, obj=None, **kwargs):
# TODO: caching, pagination
# Delegate to `read` to actually grab a list of items
items = self.read(request, **kwargs)
# Emit the list of read items.
return self.emit(items)
def post(self, request, obj, **kwargs):
raise exceptions.NotImplemented()
def put(self, request, obj, *args, **kwargs):
raise exceptions.NotImplemented()
def delete(self, request, obj, *args, **kwargs):
raise exceptions.NotImplemented()
@cached_property
def urls(self):
"""Constructs the URLs that this resource will respond to.
"""
pattern = '^{}{{}}(?:\.(?P<format>[^/]*?))?/?$'.format(self.name)
return patterns('',
url(pattern.format(''), self.dispatch, name='dispatch'),
url(pattern.format('/(?P<id>.*)'), self.dispatch, name='dispatch')
)
class Model(Resource):
"""Implementation of `Resource` for django's models.
"""
#! The class object of the django model this resource is exposing.
model = None
def read(self, request, **kwargs):
# TODO: filtering
return self.model.objects.all()
|
from os import makedirs
import csnUtility
import os.path
import warnings
import sys
import re
import glob
import traceback
# ToDo:
# - Have public and private related projects (hide the include paths from its clients)
# - Unloading all modules in csnCilab.LoadModule does not work (it will reload the >cached< module).
# This makes it impossible to use changed csn files while the GUI is still running.
# Need to replace the 'imp' approach.
# Look at RollBackImporter (http://www.koders.com/python/fid3017018D7707B26F40B546EE2D1388C1A14383D3.aspx?s=%22Steve+Purcell%22)
# - If ITK doesn't implement the DONT_INHERIT keyword, then use environment variables to work around the cmake propagation behaviour
# - Support precompiled headers by patching the produces vcproj files
# - csn python modules can contain option widgets that are loaded into CSnakeGUI! Use this to add
# selection of desired toolkit modules in csnGIMIAS
# - create convenience module csnCilabAll with attributes itk, vtk, baselib, etc.
# - install msvcp.dll and mitkstatemachine.xml
# - extend csnGUI with the option of additional root folders
# - support installing subtrees to the binfolder, so that the cardiacplugin functions correctly
# (it needs bin/debug/plugins/cardiacsegmpl/data)
root = "%s/.." % (os.path.dirname(__file__))
root = root.replace("\\", "/")
if( not root in sys.path ):
sys.path.append(root)
class DependencyError(StandardError):
pass
class SyntaxError(StandardError):
pass
class ProjectClosedError(StandardError):
pass
def Caller(up=0):
"""
Get file name, line number, function name and
source text of the caller's caller as 4-tuple:
(file, line, func, text).
The optional argument 'up' allows retrieval of
a caller further back up into the call stack.
"""
f = traceback.extract_stack(limit=up+2)
return f[0]
class OpSysSection:
"""
Helper class for OpSys
"""
def __init__(self):
self.definitions = list()
self.libraries = list()
class OpSys:
"""
Helper class that contains the settings on an operating system
"""
def __init__(self):
self.public = OpSysSection()
self.private = OpSysSection()
class Generator:
"""
Generates the CMakeLists.txt for a csnBuild.Project.
"""
def Generate(self, _targetProject, _binaryFolder, _installFolder = "", _generatedList = None, _knownProjectNames = None):
"""
Generates the CMakeLists.txt for a csnBuild.Project in _binaryFolder.
All binaries are placed in _binaryFolder/bin.
_binaryFolder -- Target location for the cmake files.
_generatedList -- List of projects for which Generate was already called
"""
isTopLevelProject = _generatedList is None
if( _generatedList is None ):
_generatedList = []
if( _knownProjectNames is None ):
_knownProjectNames = []
#csnUtility.Log("Generate %s\n" % (_targetProject.name))
#for project in _generatedList:
# csnUtility.Log("Already generated %s\n" % (project.name))
#csnUtility.Log("---\n")
if( _targetProject.name in _knownProjectNames):
raise NameError, "Each project must have a unique name. Violating project is %s in folder %s\n" % (_targetProject.name, _targetProject.sourceRootFolder)
else:
_knownProjectNames.append(_targetProject.name)
# trying to Generate a project twice indicates a logical error in the code
assert not _targetProject in _generatedList, "Target project name = %s" % (_targetProject.name)
_generatedList.append(_targetProject)
# check for backward slashes
if csnUtility.HasBackSlash(_binaryFolder):
raise SyntaxError, "Error, backslash found in binary folder %s" % _binaryFolder
if( _targetProject.type == "third party" ):
warnings.warn( "CSnake warning: you are trying to generate CMake scripts for a third party module (nothing generated)\n" )
return
# this is the OpSys instance for all operating systems
opSysAll = _targetProject.opSystems["ALL"]
# create binary project folder
binaryProjectFolder = _binaryFolder + "/" + _targetProject.binarySubfolder
os.path.exists(binaryProjectFolder) or os.makedirs(binaryProjectFolder)
# create Win32Header
if( _targetProject.type != "executable" and _targetProject.GetGenerateWin32Header() ):
self.__GenerateWin32Header(_targetProject, _binaryFolder)
if not binaryProjectFolder in _targetProject.publicIncludeFolders:
_targetProject.publicIncludeFolders.append(binaryProjectFolder)
# open cmakelists.txt
fileCMakeLists = "%s/%s" % (_binaryFolder, _targetProject.cmakeListsSubpath)
f = open(fileCMakeLists, 'w')
# write header and some cmake fields
f.write( "# CMakeLists.txt generated automatically by the CSnake generator.\n" )
f.write( "# DO NOT EDIT (changes will be lost)\n\n" )
f.write( "PROJECT(%s)\n" % (_targetProject.name) )
f.write( "SET( BINARY_DIR \"%s\")\n" % (_binaryFolder) )
binaryBinFolder = "%s/bin/%s" % (_binaryFolder, _targetProject.installSubFolder)
f.write( "\n# All binary outputs are written to the same folder.\n" )
f.write( "SET( CMAKE_SUPPRESS_REGENERATION TRUE )\n" )
f.write( "SET( EXECUTABLE_OUTPUT_PATH \"%s\")\n" % (binaryBinFolder) )
f.write( "SET( LIBRARY_OUTPUT_PATH \"%s\")\n" % (binaryBinFolder) )
# create config and use files, and include them
_targetProject.GenerateConfigFile( _binaryFolder)
_targetProject.GenerateUseFile(_binaryFolder)
# get related projects to be 'used' in the sense of including the use and config file.
projectsToUse = _targetProject.GetProjectsToUse()
# find and use related projects
for project in projectsToUse:
# include config and use file
f.write( "\n# use %s\n" % (project.name) )
f.write( "INCLUDE(\"%s\")\n" % (project.GetPathToConfigFile(_binaryFolder)) )
f.write( "INCLUDE(\"%s\")\n" % (project.GetPathToUseFile(_binaryFolder)) )
# generate moc files
cmakeMocInputVar = ""
if( len(_targetProject.sourcesToBeMoced) ):
cmakeMocInputVarName = "MOC_%s" % (_targetProject.name)
cmakeMocInputVar = "${%s}" % (cmakeMocInputVarName)
f.write("\nQT_WRAP_CPP( %s %s %s )\n" % (_targetProject.name, cmakeMocInputVarName, csnUtility.Join(_targetProject.sourcesToBeMoced, _addQuotes = 1)) )
# generate ui files
cmakeUIHInputVar = ""
cmakeUICppInputVar = ""
if( len(_targetProject.sourcesToBeUIed) ):
cmakeUIHInputVarName = "UI_H_%s" % (_targetProject.name)
cmakeUIHInputVar = "${%s}" % (cmakeUIHInputVarName)
cmakeUICppInputVarName = "UI_CPP_%s" % (_targetProject.name)
cmakeUICppInputVar = "${%s}" % (cmakeUICppInputVarName)
f.write("\nQT_WRAP_UI( %s %s %s %s )\n" % (_targetProject.name, cmakeUIHInputVarName, cmakeUICppInputVarName, csnUtility.Join(_targetProject.sourcesToBeUIed, _addQuotes = 1)) )
# write section that is specific for the project type
if( len(_targetProject.sources) ):
f.write( "\n# Add target\n" )
# add definitions
for opSysName in ["WIN32", "NOT WIN32"]:
opSys = _targetProject.opSystems[opSysName]
if( len(opSys.private.definitions) ):
f.write( "IF(%s)\n" % (opSysName))
f.write( "ADD_DEFINITIONS(%s)\n" % csnUtility.Join(opSys.private.definitions) )
f.write( "ENDIF(%s)\n" % (opSysName))
if( len(opSysAll.private.definitions) ):
f.write( "ADD_DEFINITIONS(%s)\n" % csnUtility.Join(opSysAll.private.definitions) )
# add sources
if(_targetProject.type == "executable" ):
f.write( "ADD_EXECUTABLE(%s %s %s %s %s)\n" % (_targetProject.name, cmakeUIHInputVar, cmakeUICppInputVar, cmakeMocInputVar, csnUtility.Join(_targetProject.sources, _addQuotes = 1)) )
elif(_targetProject.type == "library" ):
f.write( "ADD_LIBRARY(%s STATIC %s %s %s %s)\n" % (_targetProject.name, cmakeUIHInputVar, cmakeUICppInputVar, cmakeMocInputVar, csnUtility.Join(_targetProject.sources, _addQuotes = 1)) )
elif(_targetProject.type == "dll" ):
f.write( "ADD_LIBRARY(%s SHARED %s %s %s %s)\n" % (_targetProject.name, cmakeUIHInputVar, cmakeUICppInputVar, cmakeMocInputVar, csnUtility.Join(_targetProject.sources, _addQuotes = 1)) )
else:
raise NameError, "Unknown project type %s" % _targetProject.type
# write section for sorting moc and ui files in a separate folder in Visual Studio
f.write( "\n # Create source groups \n" )
f.write( "IF (WIN32)\n" )
f.write( " SOURCE_GROUP(\"Generated MOC Files\" REGULAR_EXPRESSION moc_[a-zA-Z0-9_]*[.]cxx$)\n")
f.write( " SOURCE_GROUP(\"Forms\" REGULAR_EXPRESSION [.]ui$)\n")
f.write( "ENDIF(WIN32)\n\n" )
# add standard definition to allow multiply defined symbols in the linker
f.write( "SET_TARGET_PROPERTIES(%s PROPERTIES LINK_FLAGS \"/FORCE:MULTIPLE\")" % _targetProject.name)
# add install rule
if( _installFolder != "" and _targetProject.type != "library"):
destination = "%s/%s" % (_installFolder, _targetProject.installSubFolder)
f.write( "\n# Rule for installing files in location %s\n" % destination)
f.write( "INSTALL(TARGETS %s DESTINATION %s)\n" % (_targetProject.name, destination) )
# Find projects that must be generated. A separate list is used to ease debugging.
projectsToGenerate = set()
requiredProjects = _targetProject.RequiredProjects(_recursive = 1)
for projectToGenerate in requiredProjects:
# determine if we must Generate the project. If a required project will generate it,
# then leave it to the required project. This will prevent multiple generation of the same project.
# If a non-required project will generate it, then still generate the project
# (the non-required project may depend on target project to generate project, creating a race condition).
generateProject = not projectToGenerate in _generatedList and projectToGenerate.type != "third party"
if( generateProject ):
for requiredProject in _targetProject.RequiredProjects(_recursive = 0):
if( requiredProject.DependsOn(projectToGenerate) ):
generateProject = 0
if( generateProject ):
projectsToGenerate.add(projectToGenerate)
f.write( "\n" )
# add non-required projects that have not yet been generated to projectsToGenerate
for project in _targetProject.projectsNonRequired:
if( not project in _generatedList ):
projectsToGenerate.add(project)
# generate projects, and add a line with ADD_SUBDIRECTORY
for project in projectsToGenerate:
# check again if a previous iteration of this loop didn't add project to the generated list
if not project in _generatedList:
f.write( "ADD_SUBDIRECTORY(\"${BINARY_DIR}/%s\" \"${BINARY_DIR}/%s\")\n" % (project.binarySubfolder, project.binarySubfolder) )
self.Generate(project, _binaryFolder, _installFolder, _generatedList, _knownProjectNames)
# add dependencies
f.write( "\n" )
for project in requiredProjects:
if( len(project.sources) ):
f.write( "ADD_DEPENDENCIES(%s %s)\n" % (_targetProject.name, project.name) )
# if top level project, add install rules for all the filesToInstall
if isTopLevelProject:
for mode in ("debug", "release"):
for project in _targetProject.GetProjectsToUse():
# iterate over filesToInstall to be copied in this mode
for location in project.filesToInstall[mode].keys():
files = ""
for file in project.filesToInstall[mode][location]:
files += "%s " % file.replace("\\", "/")
if files != "":
destination = "%s/%s" % (_installFolder, location)
f.write( "\n# Rule for installing files in location %s\n" % destination)
f.write("INSTALL(FILES %s DESTINATION %s CONFIGURATIONS %s)\n" % (files, destination, mode.upper()))
f.close()
def __GenerateWin32Header(self, _targetProject, _binaryFolder):
"""
Generates the ProjectNameWin32.h header file for exporting/importing dll functions.
"""
templateFilename = root + "/CSnake/TemplateSourceFiles/Win32Header.h"
if( _targetProject.type == "library" ):
templateFilename = root + "/CSnake/TemplateSourceFiles/Win32Header.lib.h"
templateOutputFilename = "%s/%s/%sWin32Header.h" % (_binaryFolder, _targetProject.binarySubfolder, _targetProject.name)
assert os.path.exists(templateFilename), "File not found %s\n" % (templateFilename)
f = open(templateFilename, 'r')
template = f.read()
template = template.replace('${PROJECTNAME_UPPERCASE}', _targetProject.name.upper())
template = template.replace('${PROJECTNAME}', _targetProject.name)
f.close()
# don't overwrite the existing file if it contains the same text, because this will trigger a source recompile later!
if( csnUtility.FileToString(templateOutputFilename) != template ):
f = open(templateOutputFilename, 'w')
f.write(template)
f.close()
class Project:
"""
Contains the data for the makefile (or vcproj) for a project.
_name -- Name of the project, e.g. \"SampleApp\"
_type -- Type of the project, should be \"executable\", \"library\", \"dll\" or \"third party\"
_callerDepth - This (advanced) parameter determines who is calling the constructor. The name of the caller is used
to fill the value of self.sourceRootFolder. Normally, you don't need to change this parameter.
Config and use file:
CMake uses config and use files to let packages use other packages. The config file assigns a number of variables
such as SAMPLE_APP_INCLUDE_DIRECTORIES and SAMPLE_APP_LIBRARY_DIRECTORIES. The use file uses these values to add
include directories and library directories to the current CMake target. The standard way to use these files is to a)
make sure that SAMPLE_APP_DIR points to the location of SAMPLE_APPConfig.cmake and UseSAMPLE_APP.cmake, b)
call FIND_PACKAGE(SAMPLE_APP) and c) call INCLUDE(${SAMPLE_APP_USE_FILE}. In step b) the config file of SAMPLE_APP is included and
in step c) the necessary include directories, library directories etc are added to the current target.
To adhere to normal CMake procedures, csnBuild also uses the use file and config file. However, FIND_PACKAGE is not needed,
because we can directly include first the config file and then the use file.
The constructors initialises these member variables:
self.binarySubfolder -- Direct subfolder - within the binary folder - for this project. Is either 'executable' or 'library'.
self.installSubfolder -- Direct subfolder - within the install folder - for targets generated by this project.
self.useBefore -- A list of projects. This project must be used before the projects in this list.
self.configFilePath -- The config file for the project. See above.
self.sources -- Sources to be compiled for this target.
self.opSystems -- Dictionary (WIN32/NOT WIN32/ALL -> OpSys) with definitions to be used for different operating systems.
self.sourcesToBeMoced -- Sources for which a moc file must be generated.
self.sourcesToBeUIed -- Sources for which qt's UI.exe must be run.
self.filesToInstall -- Contains files to be installed in the binary folder. It has the structure filesToInstall[mode][installPath] = files.
For example: if self.filesToInstall[\"debug\"][\"data\"] = [\"c:/one.txt\", \"c:/two.txt\"],
then c:/one.txt and c:/two.txt must be installed in the data subfolder of the binary folder when in debug mode.
self.useFilePath -- Path to the use file of the project. If it is relative, then the binary folder will be prepended.
self.cmakeListsSubpath -- The cmake file that builds the project as a target
self.projects -- Set of related project instances. These projects have been added to self using AddProjects.
self.projectsNonRequired = Subset of self.projects. Contains projects that self doesn't depend on.
self.publicIncludeFolders -- List of include search folders required to build a target that uses this project.
self.publicLibraryFolders -- List of library search folders required to build a target that uses this project.
self.generateWin32Header -- Flag that says if a standard Win32Header.h must be generated
"""
def __init__(self, _name, _type, _callerDepth = 1):
self.publicIncludeFolders = []
self.publicLibraryFolders = []
self.sources = []
self.opSystems = dict()
for opSysName in ["WIN32", "NOT WIN32", "ALL"]:
opSys = OpSys()
self.opSystems[opSysName] = opSys
self.sourcesToBeMoced = []
self.sourcesToBeUIed = []
self.name = _name
self.filesToInstall = dict()
self.filesToInstall["debug"] = dict()
self.filesToInstall["release"] = dict()
self.type = _type
self.sourceRootFolder = os.path.normpath(os.path.dirname(Caller(_callerDepth)[0])).replace("\\", "/")
self.useBefore = []
if( self.type == "dll" ):
self.binarySubfolder = "library/%s" % (_name)
else:
self.binarySubfolder = "%s/%s" % (self.type, _name)
self.installSubFolder = ""
self.configFilePath = "%s/%sConfig.cmake" % (self.binarySubfolder, _name)
self.useFilePath = "%s/Use%s.cmake" % (self.binarySubfolder, _name)
self.cmakeListsSubpath = "%s/CMakeLists.txt" % (self.binarySubfolder)
self.projects = set()
self.projectsNonRequired = set()
self.generateWin32Header = 1
def AddProjects(self, _projects, _dependency = 1):
"""
Adds projects in _projects as required projects.
_dependency - Flag that states that self target requires (is dependent on) _projects.
Raises StandardError in case of a cyclic dependency.
"""
for projectToAdd in _projects:
if( self is projectToAdd ):
raise DependencyError, "Project %s cannot be added to itself" % (projectToAdd.name)
if( not projectToAdd in self.projects ):
if( _dependency and projectToAdd.DependsOn(self) ):
raise DependencyError, "Circular dependency detected during %s.AddProjects(%s)" % (self.name, projectToAdd.name)
self.projects.add( projectToAdd )
if( not _dependency ):
self.projectsNonRequired.add( projectToAdd )
def AddSources(self, _listOfSourceFiles, _moc = 0, _ui = 0, _checkExists = 1):
"""
Adds items to self.sources. For each source file that is not an absolute path, self.sourceRootFolder is prefixed.
Entries of _listOfSourceFiles may contain wildcards, such as src/*/*.h.
If _moc, then a moc file is generated for each header file in _listOfSourceFiles.
If _ui, then qt's ui.exe is run for the file.
If _checkExists, then added sources (possibly after wildcard expansion) must exist on the filesystem, or an exception is thrown.
"""
for sourceFile in _listOfSourceFiles:
sources = self.Glob(sourceFile)
if( _checkExists and not len(sources) ):
raise IOError, "Path file not found %s" % (sourceFile)
for source in sources:
# csnUtility.Log("Adding %s\n" % (source))
if _moc and not source in self.sourcesToBeMoced:
self.sourcesToBeMoced.append(source)
if( not source in self.sources ):
if( _ui ):
self.sourcesToBeUIed.append(source)
self.sources.append(source)
def AddFilesToInstall(self, _listOfFiles, _location = '.', _debugOnly = 0, _releaseOnly = 0):
"""
Adds items to self.filesToInstall.
Entries of _listOfFiles may contain wildcards, such as lib/*/*.dll.
Relative paths in _listOfFiles are assumed to be relative to the root of the binary folder where the targets
are created.
_debugOnly - If true, then the dll is only installed to the debug install folder.
_releaseOnly - If true, then the dll is only installed to the release install folder.
"""
for dll in _listOfFiles:
if not _debugOnly:
if not self.filesToInstall["release"].has_key(_location):
self.filesToInstall["release"][_location] = []
if not dll in self.filesToInstall["release"][_location]:
self.filesToInstall["release"][_location].append( dll )
if not _releaseOnly:
if not self.filesToInstall["debug"].has_key(_location):
self.filesToInstall["debug"][_location] = []
if not dll in self.filesToInstall["debug"][_location]:
self.filesToInstall["debug"][_location].append( dll )
def AddDefinitions(self, _listOfDefinitions, _private = 0, _WIN32 = 0, _NOT_WIN32 = 0 ):
"""
Adds definitions to self.opSystems.
"""
opSystemName = self.__GetOpSysName(_WIN32, _NOT_WIN32)
opSys = self.opSystems[opSystemName]
if( _private ):
opSys.private.definitions.extend(_listOfDefinitions)
else:
opSys.public.definitions.extend(_listOfDefinitions)
def AddPublicIncludeFolders(self, _listOfIncludeFolders):
"""
Adds items to self.publicIncludeFolders.
If an item has a relative path, then it will be prefixed with _sourceRootFolder.
Added include paths must exist on the filesystem.
"""
for includeFolder in _listOfIncludeFolders:
self.publicIncludeFolders.append( self.__FindPath(includeFolder) )
def AddPublicLibraryFolders(self, _listOfLibraryFolders):
"""
Adds items to self.publicLibraryFolders.
If an item has a relative path, then it will be prefixed with _sourceRootFolder.
Added library paths must exist on the filesystem.
"""
for libraryFolder in _listOfLibraryFolders:
self.publicLibraryFolders.append( self.__FindPath(libraryFolder) )
def AddPublicLibraries(self, _type, _listOfLibraries, _WIN32 = 0, _NOT_WIN32 = 0):
"""
Adds items to self.publicLibraries.
_type - Should be \"debug\", \"optimized\" or \"all\".
"""
assert _type in ("debug", "optimized", "all"), "%s not any of (\"debug\", \"optimized\", \"all\"" % (_type)
if( _type == "all" ):
_type = ""
opSysName = self.__GetOpSysName(_WIN32, _NOT_WIN32)
opSys = self.opSystems[opSysName]
for library in _listOfLibraries:
opSys.public.libraries.append("%s %s" % (_type, library))
def __GetOpSysName(self, _WIN32, _NOT_WIN32):
"""
Returns "ALL", "WIN32" or "NOT WIN32"
"""
if( _WIN32 and _NOT_WIN32 ):
_WIN32 = _NOT_WIN32 = 0
compiler = "ALL"
if( _WIN32 ):
compiler = "WIN32"
elif( _NOT_WIN32 ):
compiler = "NOT WIN32"
return compiler
def __FindPath(self, _path):
"""
Tries to locate _path as an absolute path or as a path relative to self.sourceRootFolder.
Returns an absolute path, containing only forward slashes.
Throws IOError if path was not found.
"""
# csnUtility.Log( "Finding %s in %s\n" % (_path, self.sourceRootFolder) )
path = _path
if( not os.path.isabs(path) ):
path = os.path.abspath("%s/%s" % (self.sourceRootFolder, path))
if( not os.path.exists(path) ):
raise IOError, "Path file not found %s (tried %s)" % (_path, path)
path = path.replace("\\", "/")
assert not csnUtility.HasBackSlash(path), path
return path
def Glob(self, _path):
"""
Returns a list of files that match _path (which can be absolute, or relative to self.sourceRootFolder).
The return paths are absolute, containing only forward slashes.
"""
path = _path
if not os.path.isabs(_path):
path = os.path.abspath("%s/%s" % (self.sourceRootFolder, path))
return [x.replace("\\", "/") for x in glob.glob(path)]
def DependsOn(self, _otherProject, _skipList = None):
"""
Returns true if self is (directly or indirectly) dependent upon _otherProject.
_skipList - Used to not process project twice during the recursion (also prevents infinite loops).
"""
if _skipList is None:
_skipList = []
assert not self in _skipList, "%s should not be in stoplist" % (self.name)
_skipList.append(self)
for requiredProject in self.RequiredProjects():
if requiredProject in _skipList:
continue
if requiredProject is _otherProject or requiredProject.DependsOn(_otherProject, _skipList ):
return 1
return 0
def RequiredProjects(self, _recursive = 0):
"""
Return a set of projects that self depends upon.
If recursive is true, then required projects of required projects are also retrieved.
"""
result = self.projects - self.projectsNonRequired
if( _recursive ):
moreResults = set()
for project in result:
moreResults.update( project.RequiredProjects(_recursive) )
result.update(moreResults)
return result
def UseBefore(self, _otherProject):
"""
Indicate that self must be used before _otherProjects in a cmake file.
Throws DependencyError if _otherProject wants to be used before self.
"""
if( _otherProject.WantsToBeUsedBefore(self) ):
raise DependencyError, "Cyclic use-before relation between %s and %s" % (self.name, _otherProject.name)
self.useBefore.append(_otherProject)
def WantsToBeUsedBefore(self, _otherProject):
"""
Return true if self wants to be used before _otherProject.
"""
if( self is _otherProject ):
return 0
if( _otherProject in self.useBefore ):
return 1
for requiredProject in self.RequiredProjects(1):
if( _otherProject in requiredProject.useBefore ):
return 1
return 0
def GetProjectsToUse(self):
"""
Determine a list of projects that must be used (meaning: include the config and use file) to generate this project.
Note that self is also included in this list.
The list is sorted in the correct order, using Project.WantsToBeUsedBefore.
"""
result = []
projectsToUse = [project for project in self.RequiredProjects(_recursive = 1)]
assert not self in projectsToUse, "%s should not be in projectsToUse" % (self.name)
projectsToUse.append(self)
(count, maxCount) = (0, 1)
for i in range(len(projectsToUse)):
maxCount = maxCount * (i+1)
while (len(projectsToUse)):
assert count < maxCount
count += 1
project = projectsToUse.pop()
# check if we must skip this project for now, because another project must be used before this one
skipThisProjectForNow = 0
for otherProject in projectsToUse:
if( otherProject.WantsToBeUsedBefore(project) ):
#print ("%s wants to be before %s\n" % (otherProject.name, project.name))
skipThisProjectForNow = 1
if( skipThisProjectForNow ):
projectsToUse.insert(0, project)
continue
else:
result.append(project)
return result
def GenerateConfigFile(self, _binaryFolder):
"""
Generates the XXXConfig.cmake file for this project.
"""
fileConfig = "%s/%s" % (_binaryFolder, self.configFilePath)
f = open(fileConfig, 'w')
# write header and some cmake fields
f.write( "# File generated automatically by the CSnake generator.\n" )
f.write( "# DO NOT EDIT (changes will be lost)\n\n" )
f.write( "SET( %s_FOUND \"TRUE\" )\n" % (self.name) )
f.write( "SET( %s_INCLUDE_DIRS %s )\n" % (self.name, csnUtility.Join(self.publicIncludeFolders, _addQuotes = 1)) )
f.write( "SET( %s_LIBRARY_DIRS %s )\n" % (self.name, csnUtility.Join(self.publicLibraryFolders, _addQuotes = 1)) )
for opSysName in ["WIN32", "NOT WIN32"]:
opSys = self.opSystems[opSysName]
if( len(opSys.public.libraries) ):
f.write( "IF(%s)\n" % (opSysName))
f.write( "SET( %s_LIBRARIES %s )\n" % (self.name, csnUtility.Join(opSys.public.libraries, _addQuotes = 1)) )
f.write( "ENDIF(%s)\n" % (opSysName))
opSysAll = self.opSystems["ALL"]
if( len(opSysAll.public.libraries) ):
f.write( "SET( %s_LIBRARIES ${%s_LIBRARIES} %s )\n" % (self.name, self.name, csnUtility.Join(opSysAll.public.libraries, _addQuotes = 1)) )
def GenerateUseFile(self, _binaryFolder):
"""
Generates the UseXXX.cmake file for this project.
"""
fileUse = "%s/%s" % (_binaryFolder, self.useFilePath)
f = open(fileUse, 'w')
# write header and some cmake fields
f.write( "# File generated automatically by the CSnake generator.\n" )
f.write( "# DO NOT EDIT (changes will be lost)\n\n" )
f.write( "INCLUDE_DIRECTORIES(${%s_INCLUDE_DIRS})\n" % (self.name) )
f.write( "LINK_DIRECTORIES(${%s_LIBRARY_DIRS})\n" % (self.name) )
f.write( "LINK_LIBRARIES(${%s_LIBRARIES})\n" % (self.name) )
# write definitions
for opSysName in ["WIN32", "NOT WIN32"]:
opSys = self.opSystems[opSysName]
if( len(opSys.public.definitions) ):
f.write( "IF(%s)\n" % (opSysName))
f.write( "ADD_DEFINITIONS(%s)\n" % csnUtility.Join(opSys.public.definitions) )
f.write( "ENDIF(%s)\n" % (opSysName))
opSysAll = self.opSystems["ALL"]
if( len(opSysAll.public.definitions) ):
f.write( "ADD_DEFINITIONS(%s)\n" % csnUtility.Join(opSysAll.public.definitions) )
# write definitions that state whether this is a static library
#if self.type == "library":
# f.write( "ADD_DEFINITIONS(%sSTATIC)\n" % self.name )
def GetPathToConfigFile(self, _binaryFolder):
"""
Returns self.useFilePath if it is absolute. Otherwise, returns _binaryFolder + self.useFilePath.
"""
if( os.path.isabs(self.configFilePath) ):
return self.configFilePath
else:
return "%s/%s" % (_binaryFolder, self.configFilePath)
def GetPathToUseFile(self, _binaryFolder):
"""
Returns self.useFilePath if it is absolute. Otherwise, returns _binaryFolder + self.useFilePath.
"""
if( os.path.isabs(self.useFilePath) ):
return self.useFilePath
else:
return "%s/%s" % (_binaryFolder, self.useFilePath)
def ResolvePathsOfFilesToInstall(self, _thirdPartyBinFolder):
""" This function replaces relative paths and wildcards in self.filesToInstall with absolute paths without wildcards. """
for mode in ("debug", "release"):
for project in self.GetProjectsToUse():
for location in project.filesToInstall[mode].keys():
newList = []
for dllPattern in project.filesToInstall[mode][location]:
path = dllPattern.replace("\\", "/")
if not os.path.isabs(path):
path = "%s/%s" % (_thirdPartyBinFolder, path)
for dll in glob.glob(path):
newList.append(dll)
project.filesToInstall[mode][location] = newList
def SetGenerateWin32Header(self, _flag):
self.generateWin32Header = _flag
def GetGenerateWin32Header(self):
return self.generateWin32Header
Refactor: added includeFolders and libraryFolders to the OpSysSection class
git-svn-id: 5b21af815eabb73dc5820112928ed0bea355bed4@43 9ffc3505-93cb-cd4b-9e5d-8a77f6415fcf
from os import makedirs
import csnUtility
import os.path
import warnings
import sys
import re
import glob
import traceback
# ToDo:
# - Have public and private related projects (hide the include paths from its clients)
# - Unloading all modules in csnCilab.LoadModule does not work (it will reload the >cached< module).
# This makes it impossible to use changed csn files while the GUI is still running.
# Need to replace the 'imp' approach.
# Look at RollBackImporter (http://www.koders.com/python/fid3017018D7707B26F40B546EE2D1388C1A14383D3.aspx?s=%22Steve+Purcell%22)
# - If ITK doesn't implement the DONT_INHERIT keyword, then use environment variables to work around the cmake propagation behaviour
# - Support precompiled headers by patching the produces vcproj files
# - csn python modules can contain option widgets that are loaded into CSnakeGUI! Use this to add
# selection of desired toolkit modules in csnGIMIAS
# - create convenience module csnCilabAll with attributes itk, vtk, baselib, etc.
# - install msvcp.dll and mitkstatemachine.xml
# - extend csnGUI with the option of additional root folders
# - support installing subtrees to the binfolder, so that the cardiacplugin functions correctly
# (it needs bin/debug/plugins/cardiacsegmpl/data)
root = "%s/.." % (os.path.dirname(__file__))
root = root.replace("\\", "/")
if( not root in sys.path ):
sys.path.append(root)
class DependencyError(StandardError):
pass
class SyntaxError(StandardError):
pass
class ProjectClosedError(StandardError):
pass
def Caller(up=0):
"""
Get file name, line number, function name and
source text of the caller's caller as 4-tuple:
(file, line, func, text).
The optional argument 'up' allows retrieval of
a caller further back up into the call stack.
"""
f = traceback.extract_stack(limit=up+2)
return f[0]
class OpSysSection:
"""
Helper class for OpSys
"""
def __init__(self):
self.definitions = list()
self.libraries = list()
self.includeFolders = list()
self.libraryFolders = list()
class OpSys:
"""
Helper class that contains the settings on an operating system
"""
def __init__(self):
self.public = OpSysSection()
self.private = OpSysSection()
class Generator:
"""
Generates the CMakeLists.txt for a csnBuild.Project.
"""
def Generate(self, _targetProject, _binaryFolder, _installFolder = "", _generatedList = None, _knownProjectNames = None):
"""
Generates the CMakeLists.txt for a csnBuild.Project in _binaryFolder.
All binaries are placed in _binaryFolder/bin.
_binaryFolder -- Target location for the cmake files.
_generatedList -- List of projects for which Generate was already called
"""
isTopLevelProject = _generatedList is None
if( _generatedList is None ):
_generatedList = []
if( _knownProjectNames is None ):
_knownProjectNames = []
#csnUtility.Log("Generate %s\n" % (_targetProject.name))
#for project in _generatedList:
# csnUtility.Log("Already generated %s\n" % (project.name))
#csnUtility.Log("---\n")
if( _targetProject.name in _knownProjectNames):
raise NameError, "Each project must have a unique name. Violating project is %s in folder %s\n" % (_targetProject.name, _targetProject.sourceRootFolder)
else:
_knownProjectNames.append(_targetProject.name)
# trying to Generate a project twice indicates a logical error in the code
assert not _targetProject in _generatedList, "Target project name = %s" % (_targetProject.name)
_generatedList.append(_targetProject)
# check for backward slashes
if csnUtility.HasBackSlash(_binaryFolder):
raise SyntaxError, "Error, backslash found in binary folder %s" % _binaryFolder
if( _targetProject.type == "third party" ):
warnings.warn( "CSnake warning: you are trying to generate CMake scripts for a third party module (nothing generated)\n" )
return
# this is the OpSys instance for all operating systems
opSysAll = _targetProject.opSystems["ALL"]
# create binary project folder
binaryProjectFolder = _binaryFolder + "/" + _targetProject.binarySubfolder
os.path.exists(binaryProjectFolder) or os.makedirs(binaryProjectFolder)
# create Win32Header
if( _targetProject.type != "executable" and _targetProject.GetGenerateWin32Header() ):
self.__GenerateWin32Header(_targetProject, _binaryFolder)
if not binaryProjectFolder in opSysAll.public.includeFolders:
opSysAll.public.includeFolders.append(binaryProjectFolder)
# open cmakelists.txt
fileCMakeLists = "%s/%s" % (_binaryFolder, _targetProject.cmakeListsSubpath)
f = open(fileCMakeLists, 'w')
# write header and some cmake fields
f.write( "# CMakeLists.txt generated automatically by the CSnake generator.\n" )
f.write( "# DO NOT EDIT (changes will be lost)\n\n" )
f.write( "PROJECT(%s)\n" % (_targetProject.name) )
f.write( "SET( BINARY_DIR \"%s\")\n" % (_binaryFolder) )
binaryBinFolder = "%s/bin/%s" % (_binaryFolder, _targetProject.installSubFolder)
f.write( "\n# All binary outputs are written to the same folder.\n" )
f.write( "SET( CMAKE_SUPPRESS_REGENERATION TRUE )\n" )
f.write( "SET( EXECUTABLE_OUTPUT_PATH \"%s\")\n" % (binaryBinFolder) )
f.write( "SET( LIBRARY_OUTPUT_PATH \"%s\")\n" % (binaryBinFolder) )
# create config and use files, and include them
_targetProject.GenerateConfigFile( _binaryFolder)
_targetProject.GenerateUseFile(_binaryFolder)
# get related projects to be 'used' in the sense of including the use and config file.
projectsToUse = _targetProject.GetProjectsToUse()
# find and use related projects
for project in projectsToUse:
# include config and use file
f.write( "\n# use %s\n" % (project.name) )
f.write( "INCLUDE(\"%s\")\n" % (project.GetPathToConfigFile(_binaryFolder)) )
f.write( "INCLUDE(\"%s\")\n" % (project.GetPathToUseFile(_binaryFolder)) )
# generate moc files
cmakeMocInputVar = ""
if( len(_targetProject.sourcesToBeMoced) ):
cmakeMocInputVarName = "MOC_%s" % (_targetProject.name)
cmakeMocInputVar = "${%s}" % (cmakeMocInputVarName)
f.write("\nQT_WRAP_CPP( %s %s %s )\n" % (_targetProject.name, cmakeMocInputVarName, csnUtility.Join(_targetProject.sourcesToBeMoced, _addQuotes = 1)) )
# generate ui files
cmakeUIHInputVar = ""
cmakeUICppInputVar = ""
if( len(_targetProject.sourcesToBeUIed) ):
cmakeUIHInputVarName = "UI_H_%s" % (_targetProject.name)
cmakeUIHInputVar = "${%s}" % (cmakeUIHInputVarName)
cmakeUICppInputVarName = "UI_CPP_%s" % (_targetProject.name)
cmakeUICppInputVar = "${%s}" % (cmakeUICppInputVarName)
f.write("\nQT_WRAP_UI( %s %s %s %s )\n" % (_targetProject.name, cmakeUIHInputVarName, cmakeUICppInputVarName, csnUtility.Join(_targetProject.sourcesToBeUIed, _addQuotes = 1)) )
# write section that is specific for the project type
if( len(_targetProject.sources) ):
f.write( "\n# Add target\n" )
# add definitions
for opSysName in ["WIN32", "NOT WIN32"]:
opSys = _targetProject.opSystems[opSysName]
if( len(opSys.private.definitions) ):
f.write( "IF(%s)\n" % (opSysName))
f.write( "ADD_DEFINITIONS(%s)\n" % csnUtility.Join(opSys.private.definitions) )
f.write( "ENDIF(%s)\n" % (opSysName))
if( len(opSysAll.private.definitions) ):
f.write( "ADD_DEFINITIONS(%s)\n" % csnUtility.Join(opSysAll.private.definitions) )
# add sources
if(_targetProject.type == "executable" ):
f.write( "ADD_EXECUTABLE(%s %s %s %s %s)\n" % (_targetProject.name, cmakeUIHInputVar, cmakeUICppInputVar, cmakeMocInputVar, csnUtility.Join(_targetProject.sources, _addQuotes = 1)) )
elif(_targetProject.type == "library" ):
f.write( "ADD_LIBRARY(%s STATIC %s %s %s %s)\n" % (_targetProject.name, cmakeUIHInputVar, cmakeUICppInputVar, cmakeMocInputVar, csnUtility.Join(_targetProject.sources, _addQuotes = 1)) )
elif(_targetProject.type == "dll" ):
f.write( "ADD_LIBRARY(%s SHARED %s %s %s %s)\n" % (_targetProject.name, cmakeUIHInputVar, cmakeUICppInputVar, cmakeMocInputVar, csnUtility.Join(_targetProject.sources, _addQuotes = 1)) )
else:
raise NameError, "Unknown project type %s" % _targetProject.type
# write section for sorting moc and ui files in a separate folder in Visual Studio
f.write( "\n # Create source groups \n" )
f.write( "IF (WIN32)\n" )
f.write( " SOURCE_GROUP(\"Generated MOC Files\" REGULAR_EXPRESSION moc_[a-zA-Z0-9_]*[.]cxx$)\n")
f.write( " SOURCE_GROUP(\"Forms\" REGULAR_EXPRESSION [.]ui$)\n")
f.write( "ENDIF(WIN32)\n\n" )
# add standard definition to allow multiply defined symbols in the linker
f.write( "SET_TARGET_PROPERTIES(%s PROPERTIES LINK_FLAGS \"/FORCE:MULTIPLE\")" % _targetProject.name)
# add install rule
if( _installFolder != "" and _targetProject.type != "library"):
destination = "%s/%s" % (_installFolder, _targetProject.installSubFolder)
f.write( "\n# Rule for installing files in location %s\n" % destination)
f.write( "INSTALL(TARGETS %s DESTINATION %s)\n" % (_targetProject.name, destination) )
# Find projects that must be generated. A separate list is used to ease debugging.
projectsToGenerate = set()
requiredProjects = _targetProject.RequiredProjects(_recursive = 1)
for projectToGenerate in requiredProjects:
# determine if we must Generate the project. If a required project will generate it,
# then leave it to the required project. This will prevent multiple generation of the same project.
# If a non-required project will generate it, then still generate the project
# (the non-required project may depend on target project to generate project, creating a race condition).
generateProject = not projectToGenerate in _generatedList and projectToGenerate.type != "third party"
if( generateProject ):
for requiredProject in _targetProject.RequiredProjects(_recursive = 0):
if( requiredProject.DependsOn(projectToGenerate) ):
generateProject = 0
if( generateProject ):
projectsToGenerate.add(projectToGenerate)
f.write( "\n" )
# add non-required projects that have not yet been generated to projectsToGenerate
for project in _targetProject.projectsNonRequired:
if( not project in _generatedList ):
projectsToGenerate.add(project)
# generate projects, and add a line with ADD_SUBDIRECTORY
for project in projectsToGenerate:
# check again if a previous iteration of this loop didn't add project to the generated list
if not project in _generatedList:
f.write( "ADD_SUBDIRECTORY(\"${BINARY_DIR}/%s\" \"${BINARY_DIR}/%s\")\n" % (project.binarySubfolder, project.binarySubfolder) )
self.Generate(project, _binaryFolder, _installFolder, _generatedList, _knownProjectNames)
# add dependencies
f.write( "\n" )
for project in requiredProjects:
if( len(project.sources) ):
f.write( "ADD_DEPENDENCIES(%s %s)\n" % (_targetProject.name, project.name) )
# if top level project, add install rules for all the filesToInstall
if isTopLevelProject:
for mode in ("debug", "release"):
for project in _targetProject.GetProjectsToUse():
# iterate over filesToInstall to be copied in this mode
for location in project.filesToInstall[mode].keys():
files = ""
for file in project.filesToInstall[mode][location]:
files += "%s " % file.replace("\\", "/")
if files != "":
destination = "%s/%s" % (_installFolder, location)
f.write( "\n# Rule for installing files in location %s\n" % destination)
f.write("INSTALL(FILES %s DESTINATION %s CONFIGURATIONS %s)\n" % (files, destination, mode.upper()))
f.close()
def __GenerateWin32Header(self, _targetProject, _binaryFolder):
"""
Generates the ProjectNameWin32.h header file for exporting/importing dll functions.
"""
templateFilename = root + "/CSnake/TemplateSourceFiles/Win32Header.h"
if( _targetProject.type == "library" ):
templateFilename = root + "/CSnake/TemplateSourceFiles/Win32Header.lib.h"
templateOutputFilename = "%s/%s/%sWin32Header.h" % (_binaryFolder, _targetProject.binarySubfolder, _targetProject.name)
assert os.path.exists(templateFilename), "File not found %s\n" % (templateFilename)
f = open(templateFilename, 'r')
template = f.read()
template = template.replace('${PROJECTNAME_UPPERCASE}', _targetProject.name.upper())
template = template.replace('${PROJECTNAME}', _targetProject.name)
f.close()
# don't overwrite the existing file if it contains the same text, because this will trigger a source recompile later!
if( csnUtility.FileToString(templateOutputFilename) != template ):
f = open(templateOutputFilename, 'w')
f.write(template)
f.close()
class Project:
"""
Contains the data for the makefile (or vcproj) for a project.
_name -- Name of the project, e.g. \"SampleApp\"
_type -- Type of the project, should be \"executable\", \"library\", \"dll\" or \"third party\"
_callerDepth - This (advanced) parameter determines who is calling the constructor. The name of the caller is used
to fill the value of self.sourceRootFolder. Normally, you don't need to change this parameter.
Config and use file:
CMake uses config and use files to let packages use other packages. The config file assigns a number of variables
such as SAMPLE_APP_INCLUDE_DIRECTORIES and SAMPLE_APP_LIBRARY_DIRECTORIES. The use file uses these values to add
include directories and library directories to the current CMake target. The standard way to use these files is to a)
make sure that SAMPLE_APP_DIR points to the location of SAMPLE_APPConfig.cmake and UseSAMPLE_APP.cmake, b)
call FIND_PACKAGE(SAMPLE_APP) and c) call INCLUDE(${SAMPLE_APP_USE_FILE}. In step b) the config file of SAMPLE_APP is included and
in step c) the necessary include directories, library directories etc are added to the current target.
To adhere to normal CMake procedures, csnBuild also uses the use file and config file. However, FIND_PACKAGE is not needed,
because we can directly include first the config file and then the use file.
The constructors initialises these member variables:
self.binarySubfolder -- Direct subfolder - within the binary folder - for this project. Is either 'executable' or 'library'.
self.installSubfolder -- Direct subfolder - within the install folder - for targets generated by this project.
self.useBefore -- A list of projects. This project must be used before the projects in this list.
self.configFilePath -- The config file for the project. See above.
self.sources -- Sources to be compiled for this target.
self.opSystems -- Dictionary (WIN32/NOT WIN32/ALL -> OpSys) with definitions to be used for different operating systems.
self.sourcesToBeMoced -- Sources for which a moc file must be generated.
self.sourcesToBeUIed -- Sources for which qt's UI.exe must be run.
self.filesToInstall -- Contains files to be installed in the binary folder. It has the structure filesToInstall[mode][installPath] = files.
For example: if self.filesToInstall[\"debug\"][\"data\"] = [\"c:/one.txt\", \"c:/two.txt\"],
then c:/one.txt and c:/two.txt must be installed in the data subfolder of the binary folder when in debug mode.
self.useFilePath -- Path to the use file of the project. If it is relative, then the binary folder will be prepended.
self.cmakeListsSubpath -- The cmake file that builds the project as a target
self.projects -- Set of related project instances. These projects have been added to self using AddProjects.
self.projectsNonRequired = Subset of self.projects. Contains projects that self doesn't depend on.
self.generateWin32Header -- Flag that says if a standard Win32Header.h must be generated
"""
def __init__(self, _name, _type, _callerDepth = 1):
self.sources = []
self.opSystems = dict()
for opSysName in ["WIN32", "NOT WIN32", "ALL"]:
opSys = OpSys()
self.opSystems[opSysName] = opSys
self.sourcesToBeMoced = []
self.sourcesToBeUIed = []
self.name = _name
self.filesToInstall = dict()
self.filesToInstall["debug"] = dict()
self.filesToInstall["release"] = dict()
self.type = _type
self.sourceRootFolder = os.path.normpath(os.path.dirname(Caller(_callerDepth)[0])).replace("\\", "/")
self.useBefore = []
if( self.type == "dll" ):
self.binarySubfolder = "library/%s" % (_name)
else:
self.binarySubfolder = "%s/%s" % (self.type, _name)
self.installSubFolder = ""
self.configFilePath = "%s/%sConfig.cmake" % (self.binarySubfolder, _name)
self.useFilePath = "%s/Use%s.cmake" % (self.binarySubfolder, _name)
self.cmakeListsSubpath = "%s/CMakeLists.txt" % (self.binarySubfolder)
self.projects = set()
self.projectsNonRequired = set()
self.generateWin32Header = 1
def AddProjects(self, _projects, _dependency = 1):
"""
Adds projects in _projects as required projects.
_dependency - Flag that states that self target requires (is dependent on) _projects.
Raises StandardError in case of a cyclic dependency.
"""
for projectToAdd in _projects:
if( self is projectToAdd ):
raise DependencyError, "Project %s cannot be added to itself" % (projectToAdd.name)
if( not projectToAdd in self.projects ):
if( _dependency and projectToAdd.DependsOn(self) ):
raise DependencyError, "Circular dependency detected during %s.AddProjects(%s)" % (self.name, projectToAdd.name)
self.projects.add( projectToAdd )
if( not _dependency ):
self.projectsNonRequired.add( projectToAdd )
def AddSources(self, _listOfSourceFiles, _moc = 0, _ui = 0, _checkExists = 1):
"""
Adds items to self.sources. For each source file that is not an absolute path, self.sourceRootFolder is prefixed.
Entries of _listOfSourceFiles may contain wildcards, such as src/*/*.h.
If _moc, then a moc file is generated for each header file in _listOfSourceFiles.
If _ui, then qt's ui.exe is run for the file.
If _checkExists, then added sources (possibly after wildcard expansion) must exist on the filesystem, or an exception is thrown.
"""
for sourceFile in _listOfSourceFiles:
sources = self.Glob(sourceFile)
if( _checkExists and not len(sources) ):
raise IOError, "Path file not found %s" % (sourceFile)
for source in sources:
# csnUtility.Log("Adding %s\n" % (source))
if _moc and not source in self.sourcesToBeMoced:
self.sourcesToBeMoced.append(source)
if( not source in self.sources ):
if( _ui ):
self.sourcesToBeUIed.append(source)
self.sources.append(source)
def AddFilesToInstall(self, _listOfFiles, _location = '.', _debugOnly = 0, _releaseOnly = 0):
"""
Adds items to self.filesToInstall.
Entries of _listOfFiles may contain wildcards, such as lib/*/*.dll.
Relative paths in _listOfFiles are assumed to be relative to the root of the binary folder where the targets
are created.
_debugOnly - If true, then the dll is only installed to the debug install folder.
_releaseOnly - If true, then the dll is only installed to the release install folder.
"""
for dll in _listOfFiles:
if not _debugOnly:
if not self.filesToInstall["release"].has_key(_location):
self.filesToInstall["release"][_location] = []
if not dll in self.filesToInstall["release"][_location]:
self.filesToInstall["release"][_location].append( dll )
if not _releaseOnly:
if not self.filesToInstall["debug"].has_key(_location):
self.filesToInstall["debug"][_location] = []
if not dll in self.filesToInstall["debug"][_location]:
self.filesToInstall["debug"][_location].append( dll )
def AddDefinitions(self, _listOfDefinitions, _private = 0, _WIN32 = 0, _NOT_WIN32 = 0 ):
"""
Adds definitions to self.opSystems.
"""
opSystemName = self.__GetOpSysName(_WIN32, _NOT_WIN32)
opSys = self.opSystems[opSystemName]
if( _private ):
opSys.private.definitions.extend(_listOfDefinitions)
else:
opSys.public.definitions.extend(_listOfDefinitions)
def AddPublicIncludeFolders(self, _listOfIncludeFolders):
"""
Adds items to self.publicIncludeFolders.
If an item has a relative path, then it will be prefixed with _sourceRootFolder.
Added include paths must exist on the filesystem.
"""
opSysAll = self.opSystems["ALL"]
for includeFolder in _listOfIncludeFolders:
opSysAll.public.includeFolders.append( self.__FindPath(includeFolder) )
def AddPublicLibraryFolders(self, _listOfLibraryFolders):
"""
Adds items to self.publicLibraryFolders.
If an item has a relative path, then it will be prefixed with _sourceRootFolder.
Added library paths must exist on the filesystem.
"""
opSysAll = self.opSystems["ALL"]
for libraryFolder in _listOfLibraryFolders:
opSysAll.public.libraryFolders.append( self.__FindPath(libraryFolder) )
def AddPublicLibraries(self, _type, _listOfLibraries, _WIN32 = 0, _NOT_WIN32 = 0):
"""
Adds items to self.publicLibraries.
_type - Should be \"debug\", \"optimized\" or \"all\".
"""
assert _type in ("debug", "optimized", "all"), "%s not any of (\"debug\", \"optimized\", \"all\"" % (_type)
if( _type == "all" ):
_type = ""
opSysName = self.__GetOpSysName(_WIN32, _NOT_WIN32)
opSys = self.opSystems[opSysName]
for library in _listOfLibraries:
opSys.public.libraries.append("%s %s" % (_type, library))
def __GetOpSysName(self, _WIN32, _NOT_WIN32):
"""
Returns "ALL", "WIN32" or "NOT WIN32"
"""
if( _WIN32 and _NOT_WIN32 ):
_WIN32 = _NOT_WIN32 = 0
compiler = "ALL"
if( _WIN32 ):
compiler = "WIN32"
elif( _NOT_WIN32 ):
compiler = "NOT WIN32"
return compiler
def __FindPath(self, _path):
"""
Tries to locate _path as an absolute path or as a path relative to self.sourceRootFolder.
Returns an absolute path, containing only forward slashes.
Throws IOError if path was not found.
"""
# csnUtility.Log( "Finding %s in %s\n" % (_path, self.sourceRootFolder) )
path = _path
if( not os.path.isabs(path) ):
path = os.path.abspath("%s/%s" % (self.sourceRootFolder, path))
if( not os.path.exists(path) ):
raise IOError, "Path file not found %s (tried %s)" % (_path, path)
path = path.replace("\\", "/")
assert not csnUtility.HasBackSlash(path), path
return path
def Glob(self, _path):
"""
Returns a list of files that match _path (which can be absolute, or relative to self.sourceRootFolder).
The return paths are absolute, containing only forward slashes.
"""
path = _path
if not os.path.isabs(_path):
path = os.path.abspath("%s/%s" % (self.sourceRootFolder, path))
return [x.replace("\\", "/") for x in glob.glob(path)]
def DependsOn(self, _otherProject, _skipList = None):
"""
Returns true if self is (directly or indirectly) dependent upon _otherProject.
_skipList - Used to not process project twice during the recursion (also prevents infinite loops).
"""
if _skipList is None:
_skipList = []
assert not self in _skipList, "%s should not be in stoplist" % (self.name)
_skipList.append(self)
for requiredProject in self.RequiredProjects():
if requiredProject in _skipList:
continue
if requiredProject is _otherProject or requiredProject.DependsOn(_otherProject, _skipList ):
return 1
return 0
def RequiredProjects(self, _recursive = 0):
"""
Return a set of projects that self depends upon.
If recursive is true, then required projects of required projects are also retrieved.
"""
result = self.projects - self.projectsNonRequired
if( _recursive ):
moreResults = set()
for project in result:
moreResults.update( project.RequiredProjects(_recursive) )
result.update(moreResults)
return result
def UseBefore(self, _otherProject):
"""
Indicate that self must be used before _otherProjects in a cmake file.
Throws DependencyError if _otherProject wants to be used before self.
"""
if( _otherProject.WantsToBeUsedBefore(self) ):
raise DependencyError, "Cyclic use-before relation between %s and %s" % (self.name, _otherProject.name)
self.useBefore.append(_otherProject)
def WantsToBeUsedBefore(self, _otherProject):
"""
Return true if self wants to be used before _otherProject.
"""
if( self is _otherProject ):
return 0
if( _otherProject in self.useBefore ):
return 1
for requiredProject in self.RequiredProjects(1):
if( _otherProject in requiredProject.useBefore ):
return 1
return 0
def GetProjectsToUse(self):
"""
Determine a list of projects that must be used (meaning: include the config and use file) to generate this project.
Note that self is also included in this list.
The list is sorted in the correct order, using Project.WantsToBeUsedBefore.
"""
result = []
projectsToUse = [project for project in self.RequiredProjects(_recursive = 1)]
assert not self in projectsToUse, "%s should not be in projectsToUse" % (self.name)
projectsToUse.append(self)
(count, maxCount) = (0, 1)
for i in range(len(projectsToUse)):
maxCount = maxCount * (i+1)
while (len(projectsToUse)):
assert count < maxCount
count += 1
project = projectsToUse.pop()
# check if we must skip this project for now, because another project must be used before this one
skipThisProjectForNow = 0
for otherProject in projectsToUse:
if( otherProject.WantsToBeUsedBefore(project) ):
#print ("%s wants to be before %s\n" % (otherProject.name, project.name))
skipThisProjectForNow = 1
if( skipThisProjectForNow ):
projectsToUse.insert(0, project)
continue
else:
result.append(project)
return result
def GenerateConfigFile(self, _binaryFolder):
"""
Generates the XXXConfig.cmake file for this project.
"""
fileConfig = "%s/%s" % (_binaryFolder, self.configFilePath)
f = open(fileConfig, 'w')
opSysAll = self.opSystems["ALL"]
# write header and some cmake fields
f.write( "# File generated automatically by the CSnake generator.\n" )
f.write( "# DO NOT EDIT (changes will be lost)\n\n" )
f.write( "SET( %s_FOUND \"TRUE\" )\n" % (self.name) )
f.write( "SET( %s_INCLUDE_DIRS %s )\n" % (self.name, csnUtility.Join(opSysAll.public.includeFolders, _addQuotes = 1)) )
f.write( "SET( %s_LIBRARY_DIRS %s )\n" % (self.name, csnUtility.Join(opSysAll.public.libraryFolders, _addQuotes = 1)) )
for opSysName in ["WIN32", "NOT WIN32"]:
opSys = self.opSystems[opSysName]
if( len(opSys.public.libraries) ):
f.write( "IF(%s)\n" % (opSysName))
f.write( "SET( %s_LIBRARIES %s )\n" % (self.name, csnUtility.Join(opSys.public.libraries, _addQuotes = 1)) )
f.write( "ENDIF(%s)\n" % (opSysName))
opSysAll = self.opSystems["ALL"]
if( len(opSysAll.public.libraries) ):
f.write( "SET( %s_LIBRARIES ${%s_LIBRARIES} %s )\n" % (self.name, self.name, csnUtility.Join(opSysAll.public.libraries, _addQuotes = 1)) )
def GenerateUseFile(self, _binaryFolder):
"""
Generates the UseXXX.cmake file for this project.
"""
fileUse = "%s/%s" % (_binaryFolder, self.useFilePath)
f = open(fileUse, 'w')
# write header and some cmake fields
f.write( "# File generated automatically by the CSnake generator.\n" )
f.write( "# DO NOT EDIT (changes will be lost)\n\n" )
f.write( "INCLUDE_DIRECTORIES(${%s_INCLUDE_DIRS})\n" % (self.name) )
f.write( "LINK_DIRECTORIES(${%s_LIBRARY_DIRS})\n" % (self.name) )
f.write( "LINK_LIBRARIES(${%s_LIBRARIES})\n" % (self.name) )
# write definitions
for opSysName in ["WIN32", "NOT WIN32"]:
opSys = self.opSystems[opSysName]
if( len(opSys.public.definitions) ):
f.write( "IF(%s)\n" % (opSysName))
f.write( "ADD_DEFINITIONS(%s)\n" % csnUtility.Join(opSys.public.definitions) )
f.write( "ENDIF(%s)\n" % (opSysName))
opSysAll = self.opSystems["ALL"]
if( len(opSysAll.public.definitions) ):
f.write( "ADD_DEFINITIONS(%s)\n" % csnUtility.Join(opSysAll.public.definitions) )
# write definitions that state whether this is a static library
#if self.type == "library":
# f.write( "ADD_DEFINITIONS(%sSTATIC)\n" % self.name )
def GetPathToConfigFile(self, _binaryFolder):
"""
Returns self.useFilePath if it is absolute. Otherwise, returns _binaryFolder + self.useFilePath.
"""
if( os.path.isabs(self.configFilePath) ):
return self.configFilePath
else:
return "%s/%s" % (_binaryFolder, self.configFilePath)
def GetPathToUseFile(self, _binaryFolder):
"""
Returns self.useFilePath if it is absolute. Otherwise, returns _binaryFolder + self.useFilePath.
"""
if( os.path.isabs(self.useFilePath) ):
return self.useFilePath
else:
return "%s/%s" % (_binaryFolder, self.useFilePath)
def ResolvePathsOfFilesToInstall(self, _thirdPartyBinFolder):
""" This function replaces relative paths and wildcards in self.filesToInstall with absolute paths without wildcards. """
for mode in ("debug", "release"):
for project in self.GetProjectsToUse():
for location in project.filesToInstall[mode].keys():
newList = []
for dllPattern in project.filesToInstall[mode][location]:
path = dllPattern.replace("\\", "/")
if not os.path.isabs(path):
path = "%s/%s" % (_thirdPartyBinFolder, path)
for dll in glob.glob(path):
newList.append(dll)
project.filesToInstall[mode][location] = newList
def SetGenerateWin32Header(self, _flag):
self.generateWin32Header = _flag
def GetGenerateWin32Header(self):
return self.generateWin32Header
|
# type: ignore
from asgiref.sync import async_to_sync
from django.conf import settings
from django.contrib.sessions.backends.base import (
VALID_KEY_CHARS,
CreateError,
SessionBase,
)
from django.utils.crypto import get_random_string, salted_hmac
from django.utils.encoding import force_str
from .redis import get_connection
REDIS_SESSION_PREFIX = getattr(settings, "REDIS_SESSION_PREFIX", "session:")
def to_redis_key(session_key):
return f"{REDIS_SESSION_PREFIX}{session_key}"
class SessionStore(SessionBase):
"""
Implements Redis database session store.
"""
def __init__(self, session_key=None):
super(SessionStore, self).__init__(session_key)
def _hash(self, value):
key_salt = "openslides.utils.sessions.SessionStore"
return salted_hmac(key_salt, value).hexdigest()
def load(self):
return async_to_sync(self._load)()
async def _load(self):
async with get_connection(read_only=True) as redis:
try:
key = to_redis_key(self._get_or_create_session_key())
print("laod:", key)
session_data = await redis.get(key)
x = self.decode(force_str(session_data))
print("load result:", x)
return x
except Exception as e:
print("load ex", e)
self._session_key = None
return {}
def exists(self, session_key):
return async_to_sync(self._load)(session_key)
async def _exists(self, session_key):
async with get_connection(read_only=True) as redis:
key = to_redis_key(session_key)
print("exists:", key)
x = await redis.exists(key)
print("exists result:", x)
return x
def create(self):
async_to_sync(self._create)()
async def _create(self):
while True:
self._session_key = await self._async_get_new_session_key()
try:
await self._save(must_create=True)
except CreateError:
# Key wasn't unique. Try again.
continue
self.modified = True
return
def save(self, must_create=False):
async_to_sync(self._save)(must_create)
async def _save(self, must_create=False):
async with get_connection() as redis:
if self.session_key is None:
return await self._create()
if must_create and await self._exists(self._get_or_create_session_key()):
raise CreateError
data = self.encode(self._get_session(no_load=must_create))
print("Save:", self._get_or_create_session_key(), data)
await redis.setex(
to_redis_key(self._get_or_create_session_key()),
self.get_expiry_age(),
data,
)
def delete(self, session_key=None):
async_to_sync(self._delete)(session_key)
async def _delete(self, session_key=None):
if session_key is None:
if self.session_key is None:
return
session_key = self.session_key
async with get_connection() as redis:
try:
print("delete:", to_redis_key(session_key))
await redis.delete(to_redis_key(session_key))
except Exception as e:
print("delete ex:", e)
pass
# This must be overwritten to stay inside async code...
async def _async_get_new_session_key(self):
"Return session key that isn't being used."
while True:
session_key = get_random_string(32, VALID_KEY_CHARS)
if not await self._exists(session_key):
return session_key
@classmethod
def clear_expired(cls):
pass
Remove debug prints
# type: ignore
from asgiref.sync import async_to_sync
from django.conf import settings
from django.contrib.sessions.backends.base import (
VALID_KEY_CHARS,
CreateError,
SessionBase,
)
from django.utils.crypto import get_random_string, salted_hmac
from django.utils.encoding import force_str
from .redis import get_connection
REDIS_SESSION_PREFIX = getattr(settings, "REDIS_SESSION_PREFIX", "session:")
def to_redis_key(session_key):
return f"{REDIS_SESSION_PREFIX}{session_key}"
class SessionStore(SessionBase):
"""
Implements Redis database session store.
"""
def __init__(self, session_key=None):
super(SessionStore, self).__init__(session_key)
def _hash(self, value):
key_salt = "openslides.utils.sessions.SessionStore"
return salted_hmac(key_salt, value).hexdigest()
def load(self):
return async_to_sync(self._load)()
async def _load(self):
async with get_connection(read_only=True) as redis:
try:
key = to_redis_key(self._get_or_create_session_key())
session_data = await redis.get(key)
return self.decode(force_str(session_data))
except Exception:
self._session_key = None
return {}
def exists(self, session_key):
return async_to_sync(self._load)(session_key)
async def _exists(self, session_key):
async with get_connection(read_only=True) as redis:
key = to_redis_key(session_key)
return await redis.exists(key)
def create(self):
async_to_sync(self._create)()
async def _create(self):
while True:
self._session_key = await self._async_get_new_session_key()
try:
await self._save(must_create=True)
except CreateError:
# Key wasn't unique. Try again.
continue
self.modified = True
return
def save(self, must_create=False):
async_to_sync(self._save)(must_create)
async def _save(self, must_create=False):
async with get_connection() as redis:
if self.session_key is None:
return await self._create()
if must_create and await self._exists(self._get_or_create_session_key()):
raise CreateError
data = self.encode(self._get_session(no_load=must_create))
await redis.setex(
to_redis_key(self._get_or_create_session_key()),
self.get_expiry_age(),
data,
)
def delete(self, session_key=None):
async_to_sync(self._delete)(session_key)
async def _delete(self, session_key=None):
if session_key is None:
if self.session_key is None:
return
session_key = self.session_key
async with get_connection() as redis:
await redis.delete(to_redis_key(session_key))
# This must be overwritten to stay inside async code...
async def _async_get_new_session_key(self):
"Return session key that isn't being used."
while True:
session_key = get_random_string(32, VALID_KEY_CHARS)
if not await self._exists(session_key):
return session_key
@classmethod
def clear_expired(cls):
pass
|
# Copyright 2009 - Participatory Culture Foundation
#
# This file is part of Miro Community.
#
# Miro Community is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# Miro Community is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Miro Community. If not, see <http://www.gnu.org/licenses/>.
import re
from django.contrib.auth.models import User
from django.core.paginator import Paginator, InvalidPage
from django.db.models import Q
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import render_to_response, get_object_or_404
from django.template.context import RequestContext
from localtv.decorators import get_sitelocation, require_site_admin
from localtv import models
from localtv.util import sort_header, MockQueryset
from localtv.admin import forms
VIDEO_SERVICE_TITLES = (
re.compile(r'Uploads by (.+)'),
re.compile(r"Vimeo / (.+)'s uploaded videos")
)
## -------------------
## Source administration
## -------------------
@require_site_admin
@get_sitelocation
def manage_sources(request, sitelocation=None):
sort = request.GET.get('sort', 'name__lower')
headers = [
sort_header('name__lower', 'Source', sort),
{'label': 'Categories'},
{'label': 'User Attribution'},
sort_header('type', 'Type', sort),
sort_header('auto_approve', 'Auto Approve', sort)
]
if sort.endswith('type'):
if sort[0] == '-':
orm_sort = '-name__lower'
else:
orm_sort = 'name__lower'
else:
orm_sort = sort
feeds = models.Feed.objects.filter(
site=sitelocation.site,
status=models.FEED_STATUS_ACTIVE).extra(select={
'name__lower': 'LOWER(name)'}).order_by(orm_sort)
searches = models.SavedSearch.objects.filter(
site=sitelocation.site).extra(select={
'name__lower': 'LOWER(query_string)'}).order_by(
orm_sort)
search_string = request.GET.get('q', '')
if search_string:
feeds = feeds.filter(Q(feed_url__icontains=search_string) |
Q(name__icontains=search_string) |
Q(webpage__icontains=search_string) |
Q(description__icontains=search_string))
searches = searches.filter(query_string__icontains=search_string)
category = request.GET.get('category')
if category:
category = get_object_or_404(models.Category, pk=category)
feeds = feeds.filter(auto_categories=category)
searches = searches.filter(auto_categories=category)
author = request.GET.get('author')
if author:
author = get_object_or_404(User, pk=author)
feeds = feeds.filter(auto_authors=author)
searches = searches.filter(auto_authors=author)
source_filter = request.GET.get('filter')
if source_filter == 'search':
queryset = searches
elif source_filter in ('feed', 'user'):
q = Q(feed_url__iregex=models.VIDEO_SERVICE_REGEXES[0][1])
for service, regexp in models.VIDEO_SERVICE_REGEXES[1:]:
q = q | Q(feed_url__iregex=regexp)
if source_filter == 'user':
queryset = feeds.filter(q)
else:
queryset = feeds.exclude(q)
else:
reverse = False
if orm_sort[0] == '-':
reverse = True
orm_sort = orm_sort[1:]
feeds_list = [(getattr(feed, orm_sort), feed)
for feed in feeds]
searches_list = [(getattr(search, orm_sort), search)
for search in searches]
queryset = [l[1] for l in sorted(feeds_list + searches_list,
reverse=reverse)]
if sort.endswith('type'):
reverse = (sort[0] == '-')
queryset = sorted(queryset,
reverse=reverse,
key=lambda source: source.source_type().lower())
paginator = Paginator(queryset, 15)
try:
page = paginator.page(int(request.GET.get('page', 1)))
except InvalidPage:
raise Http404
if request.method == 'POST':
formset = forms.SourceFormset(request.POST, request.FILES,
queryset=MockQueryset(page.object_list))
if formset.is_valid():
bulk_edits = formset.extra_forms[0].cleaned_data
for key in list(bulk_edits.keys()): # get the list because we'll be
# changing the dictionary
if bulk_edits[key] in ['', None]:
del bulk_edits[key]
bulk_action = request.POST.get('bulk_action', '')
for form in formset.initial_forms:
if form.cleaned_data['bulk']:
if bulk_action == 'remove':
if request.POST.get('keep'):
form.instance.video_set.all().update(
search=None, feed=None)
form.instance.delete()
continue
if bulk_edits:
for key, value in bulk_edits.items():
form.cleaned_data[key] = value
# if the categories or authors changed, update unchanged videos
# to the new values
old_categories = set(form.instance.auto_categories.all())
old_authors = set(form.instance.auto_authors.all())
form.save()
new_categories = set(form.instance.auto_categories.all())
new_authors = set(form.instance.auto_authors.all())
if old_categories != new_categories or \
old_authors != new_authors:
for v in form.instance.video_set.all():
changed = False
if set(v.categories.all()) == old_categories:
changed = True
v.categories = new_categories
if set(v.authors.all()) == old_authors:
changed = True
v.authors = new_authors
if changed:
v.save()
for form in formset.deleted_forms:
if request.POST.get('keep'):
form.instance.video_set.all().update(search=None,
feed=None)
form.instance.delete()
path = request.get_full_path()
if '?' in path:
return HttpResponseRedirect(path + '&successful')
else:
return HttpResponseRedirect(path + '?successful')
else:
formset = forms.SourceFormset(queryset=MockQueryset(page.object_list))
return render_to_response('localtv/admin/manage_sources.html',
{
'add_feed_form': forms.AddFeedForm(),
'page': page,
'paginator': paginator,
'headers': headers,
'search_string': search_string,
'source_filter': source_filter,
'categories': models.Category.objects.filter(
site=sitelocation.site),
'users': User.objects.order_by('username'),
'successful': 'successful' in request.GET,
'formset': formset},
context_instance=RequestContext(request))
Bug #12947: Make sure sort keys are Unicode
# Copyright 2009 - Participatory Culture Foundation
#
# This file is part of Miro Community.
#
# Miro Community is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# Miro Community is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Miro Community. If not, see <http://www.gnu.org/licenses/>.
import re
from django.contrib.auth.models import User
from django.core.paginator import Paginator, InvalidPage
from django.db.models import Q
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import render_to_response, get_object_or_404
from django.template.context import RequestContext
from django.utils.encoding import force_unicode
from localtv.decorators import get_sitelocation, require_site_admin
from localtv import models
from localtv.util import sort_header, MockQueryset
from localtv.admin import forms
VIDEO_SERVICE_TITLES = (
re.compile(r'Uploads by (.+)'),
re.compile(r"Vimeo / (.+)'s uploaded videos")
)
## -------------------
## Source administration
## -------------------
@require_site_admin
@get_sitelocation
def manage_sources(request, sitelocation=None):
sort = request.GET.get('sort', 'name__lower')
headers = [
sort_header('name__lower', 'Source', sort),
{'label': 'Categories'},
{'label': 'User Attribution'},
sort_header('type', 'Type', sort),
sort_header('auto_approve', 'Auto Approve', sort)
]
if sort.endswith('type'):
if sort[0] == '-':
orm_sort = '-name__lower'
else:
orm_sort = 'name__lower'
else:
orm_sort = sort
feeds = models.Feed.objects.filter(
site=sitelocation.site,
status=models.FEED_STATUS_ACTIVE).extra(select={
'name__lower': 'LOWER(name)'}).order_by(orm_sort)
searches = models.SavedSearch.objects.filter(
site=sitelocation.site).extra(select={
'name__lower': 'LOWER(query_string)'}).order_by(
orm_sort)
search_string = request.GET.get('q', '')
if search_string:
feeds = feeds.filter(Q(feed_url__icontains=search_string) |
Q(name__icontains=search_string) |
Q(webpage__icontains=search_string) |
Q(description__icontains=search_string))
searches = searches.filter(query_string__icontains=search_string)
category = request.GET.get('category')
if category:
category = get_object_or_404(models.Category, pk=category)
feeds = feeds.filter(auto_categories=category)
searches = searches.filter(auto_categories=category)
author = request.GET.get('author')
if author:
author = get_object_or_404(User, pk=author)
feeds = feeds.filter(auto_authors=author)
searches = searches.filter(auto_authors=author)
source_filter = request.GET.get('filter')
if source_filter == 'search':
queryset = searches
elif source_filter in ('feed', 'user'):
q = Q(feed_url__iregex=models.VIDEO_SERVICE_REGEXES[0][1])
for service, regexp in models.VIDEO_SERVICE_REGEXES[1:]:
q = q | Q(feed_url__iregex=regexp)
if source_filter == 'user':
queryset = feeds.filter(q)
else:
queryset = feeds.exclude(q)
else:
reverse = False
if orm_sort[0] == '-':
reverse = True
orm_sort = orm_sort[1:]
feeds_list = [(force_unicode(getattr(feed, orm_sort)), feed)
for feed in feeds]
searches_list = [(force_unicode(getattr(search, orm_sort)), search)
for search in searches]
queryset = [l[1] for l in sorted(feeds_list + searches_list,
reverse=reverse)]
if sort.endswith('type'):
reverse = (sort[0] == '-')
queryset = sorted(queryset,
reverse=reverse,
key=lambda source: source.source_type().lower())
paginator = Paginator(queryset, 15)
try:
page = paginator.page(int(request.GET.get('page', 1)))
except InvalidPage:
raise Http404
if request.method == 'POST':
formset = forms.SourceFormset(request.POST, request.FILES,
queryset=MockQueryset(page.object_list))
if formset.is_valid():
bulk_edits = formset.extra_forms[0].cleaned_data
for key in list(bulk_edits.keys()): # get the list because we'll be
# changing the dictionary
if bulk_edits[key] in ['', None]:
del bulk_edits[key]
bulk_action = request.POST.get('bulk_action', '')
for form in formset.initial_forms:
if form.cleaned_data['bulk']:
if bulk_action == 'remove':
if request.POST.get('keep'):
form.instance.video_set.all().update(
search=None, feed=None)
form.instance.delete()
continue
if bulk_edits:
for key, value in bulk_edits.items():
form.cleaned_data[key] = value
# if the categories or authors changed, update unchanged videos
# to the new values
old_categories = set(form.instance.auto_categories.all())
old_authors = set(form.instance.auto_authors.all())
form.save()
new_categories = set(form.instance.auto_categories.all())
new_authors = set(form.instance.auto_authors.all())
if old_categories != new_categories or \
old_authors != new_authors:
for v in form.instance.video_set.all():
changed = False
if set(v.categories.all()) == old_categories:
changed = True
v.categories = new_categories
if set(v.authors.all()) == old_authors:
changed = True
v.authors = new_authors
if changed:
v.save()
for form in formset.deleted_forms:
if request.POST.get('keep'):
form.instance.video_set.all().update(search=None,
feed=None)
form.instance.delete()
path = request.get_full_path()
if '?' in path:
return HttpResponseRedirect(path + '&successful')
else:
return HttpResponseRedirect(path + '?successful')
else:
formset = forms.SourceFormset(queryset=MockQueryset(page.object_list))
return render_to_response('localtv/admin/manage_sources.html',
{
'add_feed_form': forms.AddFeedForm(),
'page': page,
'paginator': paginator,
'headers': headers,
'search_string': search_string,
'source_filter': source_filter,
'categories': models.Category.objects.filter(
site=sitelocation.site),
'users': User.objects.order_by('username'),
'successful': 'successful' in request.GET,
'formset': formset},
context_instance=RequestContext(request))
|
from django.conf.urls import patterns, include, url
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from crowdsourcing import views
from crowdsourcing.viewsets.project import *
from crowdsourcing.viewsets.user import UserViewSet, UserProfileViewSet, UserPreferencesViewSet
from crowdsourcing.viewsets.requester import RequesterRankingViewSet, RequesterViewSet, QualificationViewSet
from crowdsourcing.viewsets.worker import *
from crowdsourcing.viewsets.task import TaskViewSet, CurrencyViewSet
from rest_framework.routers import SimpleRouter
router = SimpleRouter(trailing_slash=True)
router.register(r'api/profile',UserProfileViewSet)
router.register(r'api/user', UserViewSet)
router.register(r'api/preferences', UserPreferencesViewSet)
router.register(r'api/requester-ranking', RequesterRankingViewSet)
router.register(r'api/requester', RequesterViewSet)
router.register(r'api/project', ProjectViewSet)
router.register(r'api/category', CategoryViewSet)
router.register(r'api/module', ModuleViewSet)
router.register(r'api/project-requester', ProjectRequesterViewSet)
router.register(r'api/worker-skill', WorkerSkillViewSet)
router.register(r'api/worker', WorkerViewSet)
router.register(r'api/skill', SkillViewSet)
router.register(r'api/task', TaskViewSet)
router.register(r'api/task-worker', TaskWorkerViewSet)
router.register(r'api/worker-module-application', WorkerModuleApplicationViewSet)
router.register(r'api/qualification', QualificationViewSet)
router.register(r'api/currency', CurrencyViewSet)
urlpatterns = patterns('',
url(r'^api/v1/auth/forgot-password/$',views.ForgotPassword.as_view()),
url(r'^api/v1/auth/reset-password/(?P<reset_key>\w+)/(?P<enable>[0-1]*)/$',views.reset_password),
url(r'^api/v1/auth/registration-successful',views.registration_successful),
url(r'^api/v1/auth/logout/$', views.Logout.as_view()),
url(r'^/account-activation/(?P<activation_key>\w+)/$', views.activate_account),
url(r'^api/oauth2/', include('oauth2_provider.urls', namespace='oauth2_provider')),
url(r'^api/oauth2-ng/token', views.Oauth2TokenView.as_view()),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'', include(router.urls)),
url('^.*$', views.home, name='home'),
)
urlpatterns += staticfiles_urlpatterns()
added module rating and review viewset to url
from django.conf.urls import patterns, include, url
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from crowdsourcing import views
from crowdsourcing.viewsets.project import *
from crowdsourcing.viewsets.user import UserViewSet, UserProfileViewSet, UserPreferencesViewSet
from crowdsourcing.viewsets.requester import RequesterRankingViewSet, RequesterViewSet, QualificationViewSet
from crowdsourcing.viewsets.worker import *
from crowdsourcing.viewsets.task import TaskViewSet, CurrencyViewSet
from rest_framework.routers import SimpleRouter
router = SimpleRouter(trailing_slash=True)
router.register(r'api/profile',UserProfileViewSet)
router.register(r'api/user', UserViewSet)
router.register(r'api/preferences', UserPreferencesViewSet)
router.register(r'api/requester-ranking', RequesterRankingViewSet)
router.register(r'api/requester', RequesterViewSet)
router.register(r'api/project', ProjectViewSet)
router.register(r'api/category', CategoryViewSet)
router.register(r'api/module', ModuleViewSet)
router.register(r'api/modulereview', ModuleReviewViewSet)
router.register(r'api/modulerating', ModuleRatingViewSet)
router.register(r'api/project-requester', ProjectRequesterViewSet)
router.register(r'api/worker-skill', WorkerSkillViewSet)
router.register(r'api/worker', WorkerViewSet)
router.register(r'api/skill', SkillViewSet)
router.register(r'api/task', TaskViewSet)
router.register(r'api/task-worker', TaskWorkerViewSet)
router.register(r'api/worker-module-application', WorkerModuleApplicationViewSet)
router.register(r'api/qualification', QualificationViewSet)
router.register(r'api/currency', CurrencyViewSet)
urlpatterns = patterns('',
url(r'^api/v1/auth/forgot-password/$',views.ForgotPassword.as_view()),
url(r'^api/v1/auth/reset-password/(?P<reset_key>\w+)/(?P<enable>[0-1]*)/$',views.reset_password),
url(r'^api/v1/auth/registration-successful',views.registration_successful),
url(r'^api/v1/auth/logout/$', views.Logout.as_view()),
url(r'^/account-activation/(?P<activation_key>\w+)/$', views.activate_account),
url(r'^api/oauth2/', include('oauth2_provider.urls', namespace='oauth2_provider')),
url(r'^api/oauth2-ng/token', views.Oauth2TokenView.as_view()),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'', include(router.urls)),
url('^.*$', views.home, name='home'),
)
urlpatterns += staticfiles_urlpatterns()
|
import os
from keras.models import Sequential, Model, load_model
from keras.layers import Dense, Activation, Input, LSTM, Masking, Dropout, Flatten, Reshape
from keras.layers.normalization import BatchNormalization
from keras.optimizers import SGD
import keras.backend as backend
from keras.optimizers import Adam
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from array import array
from MLHelper import Root2NumpyConverter, TrainingReader
from PyAnalysisTools.base import _logger
from PyAnalysisTools.base.ShellUtils import make_dirs
from PyAnalysisTools.ROOTUtils.FileHandle import FileHandle
from PyAnalysisTools.base.OutputHandle import SysOutputHandle as so
np.seterr(divide='ignore', invalid='ignore')
class NeuralNetwork(object):
def __init__(self, num_features, num_layers=3, size=20, lr=1e-3, keep_prob=1.0, tloss="soft", input_noise=0.0):
# self.inputs = inputs = Input(shape=num_features)
# print backend.int_shape(inputs)
# x = Reshape((-1,))(inputs)
# print backend.int_shape(x)
#
# x = Dense(size, activation='relu')(x)
# for i in range(num_layers - 1):
# x = Dense(size, activation='tanh')(x)
# #pred = Dense(1, activation='softmax')(x)
# pred = Dense(1, activation="sigmoid")(x)
# model = Model(inputs=inputs, outputs=pred)
# # self.train_op = Adam(lr)
# model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# self.kerasmodel = model
self.inputs = inputs = Input(shape=num_features)
# print backend.int_shape(inputs)
# x = Reshape((-1,))(inputs)
# print backend.int_shape(x)
model =Sequential()
#model.add(Dense(num_features[0], input_shape=(num_features[1],)))
model.add(Dense(64, input_dim=num_features[1], activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
# model.add(Activation('relu'))
# x = Dense(size, activation='relu')(x)
# for i in range(num_layers - 1):
# x = Dense(size, activation='tanh')(x)
# #pred = Dense(1, activation='softmax')(x)
# pred = Dense(1, activation="sigmoid")(x)
# model = Model(inputs=inputs, outputs=pred)
# self.train_op = Adam(lr)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
self.kerasmodel = model
class NNTrainer(object):
def __init__(self, **kwargs):
kwargs.setdefault("n_features", None)
kwargs.setdefault("layers", 2)
kwargs.setdefault("units", 10)
kwargs.setdefault("epochs", 10)
kwargs.setdefault("control_plots", False)
self.reader = TrainingReader(**kwargs)
self.converter = Root2NumpyConverter(kwargs["variables"])
self.n_features = kwargs["n_features"]
self.layers = kwargs["layers"]
self.units = kwargs["units"]
self.epochs = kwargs["epochs"]
self.max_events = kwargs["max_events"]
self.plot = True
self.do_control_plots = kwargs["control_plots"]
self.output_path = so.resolve_output_dir(output_dir=kwargs["output_path"], sub_dir_name="NNtrain")
make_dirs(os.path.join(self.output_path, "plots"))
make_dirs(os.path.join(self.output_path, "models"))
def build_input(self):
trees = self.reader.get_trees()
arrays = [[self.converter.convert_to_array(tree, max_events=self.max_events) for tree in items]
for items in trees]
self.data_train, self.label_train = self.converter.merge(arrays[0], arrays[1])
self.data_eval, self.label_eval = self.converter.merge(arrays[2], arrays[3])
self.data_train = pd.DataFrame(self.data_train)
self.data_eval = pd.DataFrame(self.data_eval)
def build_models(self):
self.model_0 = NeuralNetwork(self.data_train.shape, num_layers=self.layers, size=self.units,
tloss='soft').kerasmodel
self.model_1 = NeuralNetwork(self.data_eval.shape, num_layers=self.layers, size=self.units,
tloss='soft').kerasmodel
def apply_scaling(self):
train_mean = self.data_train.mean()
train_std = self.data_train.std()
self.data_train = (self.data_train - train_mean) / train_std
eval_mean = self.data_eval.mean()
eval_std = self.data_eval.std()
self.data_eval = (self.data_eval - eval_mean) / eval_std
def plot_train_control(self, history, name):
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.legend(["loss", "valid loss", "acc", "valid acc"])
plt.xlabel('epoch')
plt.ylabel('NN loss')
plt.savefig(os.path.join(self.output_path, 'plots/{:s}.png'.format(name)))
plt.close()
def train(self):
self.build_input()
self.build_models()
if self.do_control_plots:
self.make_control_plots("prescaling")
self.apply_scaling()
if self.do_control_plots:
self.make_control_plots("postscaling")
#print "train shape: ", train_shape
history_train = self.model_0.fit(self.data_train.values, self.label_train.reshape((self.label_train.shape[0], 1)),
epochs=self.epochs, verbose=1, batch_size=32,
shuffle=True, validation_data=(self.data_eval.values, self.label_eval)) # sample_weight=weight_0,
history_eval = self.model_1.fit(self.data_eval.values, self.label_eval.reshape((self.label_eval.shape[0], 1)), epochs=self.epochs,
verbose=1, batch_size=32,
shuffle=True, validation_data=(self.data_train.values, self.label_train)) #sample_weight=weight_0,
if self.plot:
self.plot_train_control(history_train, "train")
self.plot_train_control(history_eval, "eval")
_logger.debug("Done Training, saving models...")
self.model_0.save(os.path.join(self.output_path, "models/model_train.h5"))
self.model_1.save(os.path.join(self.output_path, "models/model_eval.h5"))
self.run_predictions()
def run_predictions(self):
_logger.info("Evaluating predictions")
preds_train = self.model_0.predict(self.data_eval.values)
preds_sig_train = preds_train[self.label_eval == 1]
preds_bkg_train = preds_train[self.label_eval == 0]
if self.plot:
_logger.debug("Consistency plots")
plt.hist(preds_sig_train, 20, range=[0., 1.], histtype='step', label='signal model0', normed=True)
plt.hist(preds_bkg_train, 20, range=[0., 1.], histtype='step', label='bkg model1', normed=True)
plt.yscale('log')
plt.grid(True)
plt.legend(["signal", "background"], loc="lower left")
plt.savefig(os.path.join(self.output_path, "plots/consistency_sig_train.png"))
def make_control_plots(self, prefix):
def make_plot(prefix, variable_name, signal, background):
data = signal
data.append(background)
if "/" in variable_name:
variable_name = "_".join(variable_name.split("/")).replace(" ","")
var_range = np.percentile(data, [2.5, 97.5])
plt.hist(map(float, signal.values), 100, range=var_range, histtype='step', label='signal', normed=True)
plt.hist(map(float, background.values), 100, range=var_range, histtype='step', label='background', normed=True)
if data.ptp() > 1000.:
plt.yscale('log')
plt.legend(["signal", "background"], loc="upper right")
plt.xlabel(variable_name)
plt.ylabel('Normalised')
plt.savefig(os.path.join(self.output_path, "plots/{:s}_{:s}.png".format(prefix, variable_name)))
plt.close()
for key in self.data_train.keys():
make_plot("{}_{}".format(prefix, "train"), key, self.data_train[key][self.label_train == 1],
self.data_train[key][self.label_train == 0])
make_plot("{}_{}".format(prefix, "eval"), key, self.data_eval[key][self.label_eval == 1],
self.data_eval[key][self.label_eval == 0])
class NNReader(object):
def __init__(self, **kwargs):
self.file_handles = [FileHandle(file_name=fn, open_option="UPDATE", run_dir=kwargs["run_dir"])
for fn in kwargs["input_files"]]
self.tree_name = kwargs["tree_name"]
self.model_train = load_model(os.path.join(os.path.abspath(kwargs["model_path"]), "model_train.h5"))
self.model_eval = load_model(os.path.join(os.path.abspath(kwargs["model_path"]), "model_eval.h5"))
self.converter = Root2NumpyConverter(kwargs["branches"])
self.branch_name = kwargs["branch_name"]
def run(self):
for file_handle in self.file_handles:
self.attach_NN_output(file_handle)
def attach_NN_output(self, file_handle):
tree = file_handle.get_object_by_name(self.tree_name, "Nominal")
data = self.converter.convert_to_array(tree)
prediction0 = self.model_train.predict(data)
prediction1 = self.model_eval.predict(data)
bdt = array('f', [0.])
branch = tree.Branch(self.branch_name, bdt, "{:s}/F".format(self.branch_name))
total_entries = tree.GetEntries()
multiple_triplets = 0
for entry in range(total_entries):
tree.GetEntry(entry)
is_train = tree.train_flag == 0
if not len(tree.object_pt) > 1:
if not is_train:
bdt[0] = prediction1[entry-multiple_triplets]
else:
bdt[0] = prediction0[entry-multiple_triplets]
else:
bdt[0] = -1
multiple_triplets += 1
branch.Fill()
tdir = file_handle.get_directory("Nominal")
tdir.cd()
tree.Write()
update NN settings
* add generic layer creation
* change optimiser
import os
from keras.models import Sequential, Model, load_model
from keras.layers import Dense, Activation, Input, LSTM, Masking, Dropout, Flatten, Reshape
from keras.layers.normalization import BatchNormalization
from keras.optimizers import SGD
import keras.backend as backend
from keras.optimizers import Adam
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from array import array
from MLHelper import Root2NumpyConverter, TrainingReader
from PyAnalysisTools.base import _logger
from PyAnalysisTools.base.ShellUtils import make_dirs
from PyAnalysisTools.ROOTUtils.FileHandle import FileHandle
from PyAnalysisTools.base.OutputHandle import SysOutputHandle as so
np.seterr(divide='ignore', invalid='ignore')
class NeuralNetwork(object):
def __init__(self, num_features, num_layers=3, size=20, lr=1e-3, keep_prob=1.0, tloss="soft", input_noise=0.0):
# self.inputs = inputs = Input(shape=num_features)
# print backend.int_shape(inputs)
# x = Reshape((-1,))(inputs)
# print backend.int_shape(x)
#
# x = Dense(size, activation='relu')(x)
# for i in range(num_layers - 1):
# x = Dense(size, activation='tanh')(x)
# #pred = Dense(1, activation='softmax')(x)
# pred = Dense(1, activation="sigmoid")(x)
# model = Model(inputs=inputs, outputs=pred)
# # self.train_op = Adam(lr)
# model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# self.kerasmodel = model
self.inputs = inputs = Input(shape=num_features)
# print backend.int_shape(inputs)
# x = Reshape((-1,))(inputs)
# print backend.int_shape(x)
model =Sequential()
#model.add(Dense(num_features[0], input_shape=(num_features[1],)))
model.add(Dense(64, input_dim=num_features[1], activation='relu'))
for i in range(num_layers - 1):
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
# model.add(Activation('relu'))
# x = Dense(size, activation='relu')(x)
# for i in range(num_layers - 1):
# x = Dense(size, activation='tanh')(x)
# #pred = Dense(1, activation='softmax')(x)
# pred = Dense(1, activation="sigmoid")(x)
# model = Model(inputs=inputs, outputs=pred)
# self.train_op = Adam(lr)
model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy'])
self.kerasmodel = model
class NNTrainer(object):
def __init__(self, **kwargs):
kwargs.setdefault("n_features", None)
kwargs.setdefault("layers", 2)
kwargs.setdefault("units", 10)
kwargs.setdefault("epochs", 10)
kwargs.setdefault("control_plots", False)
self.reader = TrainingReader(**kwargs)
self.converter = Root2NumpyConverter(kwargs["variables"])
self.n_features = kwargs["n_features"]
self.layers = kwargs["layers"]
self.units = kwargs["units"]
self.epochs = kwargs["epochs"]
self.max_events = kwargs["max_events"]
self.plot = True
self.do_control_plots = kwargs["control_plots"]
self.output_path = so.resolve_output_dir(output_dir=kwargs["output_path"], sub_dir_name="NNtrain")
make_dirs(os.path.join(self.output_path, "plots"))
make_dirs(os.path.join(self.output_path, "models"))
def build_input(self):
trees = self.reader.get_trees()
arrays = [[self.converter.convert_to_array(tree, max_events=self.max_events) for tree in items]
for items in trees]
self.data_train, self.label_train = self.converter.merge(arrays[0], arrays[1])
self.data_eval, self.label_eval = self.converter.merge(arrays[2], arrays[3])
self.data_train = pd.DataFrame(self.data_train)
self.data_eval = pd.DataFrame(self.data_eval)
def build_models(self):
self.model_0 = NeuralNetwork(self.data_train.shape, num_layers=self.layers, size=self.units,
tloss='soft').kerasmodel
self.model_1 = NeuralNetwork(self.data_eval.shape, num_layers=self.layers, size=self.units,
tloss='soft').kerasmodel
def apply_scaling(self):
train_mean = self.data_train.mean()
train_std = self.data_train.std()
self.data_train = (self.data_train - train_mean) / train_std
eval_mean = self.data_eval.mean()
eval_std = self.data_eval.std()
self.data_eval = (self.data_eval - eval_mean) / eval_std
def plot_train_control(self, history, name):
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.legend(["loss", "valid loss", "acc", "valid acc"])
plt.xlabel('epoch')
plt.ylabel('NN loss')
plt.savefig(os.path.join(self.output_path, 'plots/{:s}.png'.format(name)))
plt.close()
def train(self):
self.build_input()
self.build_models()
if self.do_control_plots:
self.make_control_plots("prescaling")
self.apply_scaling()
if self.do_control_plots:
self.make_control_plots("postscaling")
#print "train shape: ", train_shape
history_train = self.model_0.fit(self.data_train.values, self.label_train.reshape((self.label_train.shape[0], 1)),
epochs=self.epochs, verbose=1, batch_size=32,
shuffle=True, validation_data=(self.data_eval.values, self.label_eval)) # sample_weight=weight_0,
history_eval = self.model_1.fit(self.data_eval.values, self.label_eval.reshape((self.label_eval.shape[0], 1)), epochs=self.epochs,
verbose=1, batch_size=32,
shuffle=True, validation_data=(self.data_train.values, self.label_train)) #sample_weight=weight_0,
if self.plot:
self.plot_train_control(history_train, "train")
self.plot_train_control(history_eval, "eval")
_logger.debug("Done Training, saving models...")
self.model_0.save(os.path.join(self.output_path, "models/model_train.h5"))
self.model_1.save(os.path.join(self.output_path, "models/model_eval.h5"))
self.run_predictions()
def run_predictions(self):
_logger.info("Evaluating predictions")
preds_train = self.model_0.predict(self.data_eval.values)
preds_sig_train = preds_train[self.label_eval == 1]
preds_bkg_train = preds_train[self.label_eval == 0]
if self.plot:
_logger.debug("Consistency plots")
plt.hist(preds_sig_train, 20, range=[0., 1.], histtype='step', label='signal model0', normed=True)
plt.hist(preds_bkg_train, 20, range=[0., 1.], histtype='step', label='bkg model1', normed=True)
plt.yscale('log')
plt.grid(True)
plt.legend(["signal", "background"], loc="lower left")
plt.savefig(os.path.join(self.output_path, "plots/consistency_sig_train.png"))
def make_control_plots(self, prefix):
def make_plot(prefix, variable_name, signal, background):
data = signal
data.append(background)
if "/" in variable_name:
variable_name = "_".join(variable_name.split("/")).replace(" ","")
var_range = np.percentile(data, [2.5, 97.5])
plt.hist(map(float, signal.values), 100, range=var_range, histtype='step', label='signal', normed=True)
plt.hist(map(float, background.values), 100, range=var_range, histtype='step', label='background', normed=True)
if data.ptp() > 1000.:
plt.yscale('log')
plt.legend(["signal", "background"], loc="upper right")
plt.xlabel(variable_name)
plt.ylabel('Normalised')
plt.savefig(os.path.join(self.output_path, "plots/{:s}_{:s}.png".format(prefix, variable_name)))
plt.close()
for key in self.data_train.keys():
make_plot("{}_{}".format(prefix, "train"), key, self.data_train[key][self.label_train == 1],
self.data_train[key][self.label_train == 0])
make_plot("{}_{}".format(prefix, "eval"), key, self.data_eval[key][self.label_eval == 1],
self.data_eval[key][self.label_eval == 0])
class NNReader(object):
def __init__(self, **kwargs):
self.file_handles = [FileHandle(file_name=fn, open_option="UPDATE", run_dir=kwargs["run_dir"])
for fn in kwargs["input_files"]]
self.tree_name = kwargs["tree_name"]
self.model_train = load_model(os.path.join(os.path.abspath(kwargs["model_path"]), "model_train.h5"))
self.model_eval = load_model(os.path.join(os.path.abspath(kwargs["model_path"]), "model_eval.h5"))
self.converter = Root2NumpyConverter(kwargs["branches"])
self.branch_name = kwargs["branch_name"]
def run(self):
for file_handle in self.file_handles:
self.attach_NN_output(file_handle)
def attach_NN_output(self, file_handle):
tree = file_handle.get_object_by_name(self.tree_name, "Nominal")
data = self.converter.convert_to_array(tree)
prediction0 = self.model_train.predict(data)
prediction1 = self.model_eval.predict(data)
bdt = array('f', [0.])
branch = tree.Branch(self.branch_name, bdt, "{:s}/F".format(self.branch_name))
total_entries = tree.GetEntries()
multiple_triplets = 0
for entry in range(total_entries):
tree.GetEntry(entry)
is_train = tree.train_flag == 0
if not len(tree.object_pt) > 1:
if not is_train:
bdt[0] = prediction1[entry-multiple_triplets]
else:
bdt[0] = prediction0[entry-multiple_triplets]
else:
bdt[0] = -1
multiple_triplets += 1
branch.Fill()
tdir = file_handle.get_directory("Nominal")
tdir.cd()
tree.Write()
|
#!/usr/bin/env python3
import argparse
import glob
import json
import os
import re
from collections import defaultdict
from datetime import date, timedelta, datetime
OVERVIEW_COUNT = 10
# Common things ---------------------------------------------------------------
# See main at bottom
class ManualChange:
"""
Apply a change to a range of menus in the v2 API. v1 is not supported.
"""
def __init__(self, replacer, resto, start, end, all_days=False):
"""
:param replacer: The function that will do the replacements. It will receive the path to the file and the
original menu.
:param start: The start date (inclusive).
:param end: The end date (inclusive).
:param resto: Which restaurant(s) to apply to.
:param all_days: If the message should be added for all weekdays in the range. If false (the default), the
changes will only be applied if there already is a menu for the day.
"""
self.replacer = replacer
self.start = start
self.end = end
self.resto = resto
if isinstance(self.resto, str):
self.resto = [self.resto]
assert isinstance(self.resto, list)
self.all_days = all_days
def is_applicable(self, menu_date):
"""Check if this change is applicable to the given date"""
return self.start <= menu_date <= self.end
def date_range(self):
"""Return an iterator over the applicable range. Only weekdays are returned."""
for n in range(int((self.end - self.start).days) + 1):
result = self.start + timedelta(n)
if result.weekday() < 5:
yield result
# Restjesmaand Zomer 18
# Sint-Jansvest die geen menu meer serveert, alleen overschotten.
def restjesmaand18_replacer(_path, original):
# original: {"date": "2018-06-14", "meals": [], "open": false, "vegetables": []}
name = ("Om voedseloverschotten op het einde van het academiejaar te beperken, "
"kunnen we geen dagmenu presenteren. "
"Ga langs en laat je verrassen door ons keukenpersoneel.")
return {
"message": name,
"date": original["date"],
"meals": [],
"open": True,
"vegetables": [],
}
# Paasvakantie 2019
def paasvakantie19_general(_path, original):
original['message'] = ("Tijdens de paasvakantie zijn resto's Campus Sterre en Campus Merelbeke geopend als "
"cafetaria.")
original['open'] = True
return original
def paasvakantie19_en(_path, original):
original['message'] = 'During the Easter Holiday restos Campus Sterre and Campus Merelbeke operate as cafetaria.'
original['open'] = True
return original
def paasvakantie19_brug(_path, original):
original['message'] = "Tijdens de paasvakantie is De Brug enkel 's middags geopend."
return original
# Werken in De Brug waardoor de resto gesloten is.
def werken_brug19_replacer(_path, original):
message = ('De Brug sluit van 20 mei tot 30 september 2019 voor verbouwingswerken. Tijdens de sluiting neemt resto '
'Kantienberg de functies en het aanbod van de Brug over, zoals de avondopening.')
return {
"message": message,
"date": original["date"],
"open": False
}
def werken_brug19_replacer2(_path, original):
message = ("Resto De Brug en Cafetaria De Brug zijn nog even gesloten in afwachting van het voltooien van de"
" werken. Tot dan kan je's middags en 's avonds terecht in Resto Kantienberg. Wij houden jullie op de"
" hoogte!<br>'s Middags is Resto Sint-Jansvest tijdelijk een reguliere resto met een uitgebreid aanbod"
" aan belegde broodjes. Enkel soep of broodjes nodig? Dan is Cafetaria campus Boekentoren (via"
" Blandijnberg) zeer dichtbij.")
return {
"message": message,
"date": original["date"],
"open": False
}
def tijdelijke_sluiting_sint_jansvest(_path, original):
message = "Resto Sint-Jansvest is tijdelijk gesloten wegens wegenwerken. Tijdens de werken kan u terecht in De " \
"Brug. "
return {
"message": message,
"date": original["date"],
"open": False,
"meals": original.get("meals", [])
}
def corona_sluiting_nl(_path, original):
message = "De studentenrestaurants en cafetaria's sluiten vanaf maandag 16 maart 2020 de deuren. " \
"De UGent neemt die maatregel om verdere verspreiding van het coronavirus tot een minimum te beperken. " \
"De sluiting loopt zeker tot en met 7 juni 2020."
return {
"message": message,
"date": original["date"],
"open": False
}
def corona_sluiting_en(_path, original):
message = "The student restaurants and cafeterias will be closed as from Monday 16 March 2020. " \
"Ghent University is taking this measure to minimize the further spreading of the coronavirus. " \
"The closure will certainly last until 7 June 2020."
return {
"message": message,
"date": original["date"],
"open": False
}
def corona_heropening_nl(_path, original):
message = "Ter plaatse eten is momenteel niet mogelijk; enkel takeaway van een beperkt aanbod. De coronamaatregelen blijven van kracht! " \
"Resto Dunant, Coupure en Sterre en van cafetaria UZ Gent en Boekentoren zijn opnieuw open. " \
"Bij de start van het academiejaar volgen de andere locaties."
return {
"message": message,
"date": original["date"],
"open": True,
"meals": [{
"kind": "meat",
"type": "main",
"name": "Spaghetti bolognese met kaas",
"price": "\u20ac 3,60"
}, {
"kind": "vegetarian",
"type": "main",
"name": "Salad bowl: Caesar",
"price": ""
}, {
"kind": "vegetarian",
"type": "main",
"name": "Salad bowl: Tomaat-Mozzarella",
"price": ""
}, {
"kind": "soup",
"type": "main",
"name": "Dagsoep",
"price": ""
}],
"vegetables": []
}
def corona_heropening_en(_path, original):
message = "The canteen is closed; only takeaway of a reduced offering is possible. The Corona measures remain active! " \
"Resto Dunant, Coupure & Sterre and cafetaria UZ Gent & Boekentoren are open. " \
"At the start of the academic year, the other locations will follow."
return {
"message": message,
"date": original["date"],
"open": True,
"meals": [{
"kind": "meat",
"type": "main",
"name": "Spaghetti bolognese with cheese",
"price": "\u20ac 3,60"
}, {
"kind": "vegetarian",
"type": "main",
"name": "Salad bowl: Caesar",
"price": ""
}, {
"kind": "vegetarian",
"type": "main",
"name": "Salad bowl: Tomato-Mozzarella",
"price": ""
}, {
"kind": "soup",
"type": "main",
"name": "Soup of the day",
"price": ""
}],
"vegetables": []
}
def corona_closed_for_now(_path, original):
message = "Resto Dunant, Coupure en Sterre en van cafetaria UZ Gent en Boekentoren zijn opnieuw open. " \
"Bij de start van het academiejaar volgen de andere locaties."
return {
"message": message,
"date": original["date"],
"open": False
}
def kantienberg_2020(_path, original):
return {
"message": "Resto Kantienberg blijft gesloten tijdens academiejaar 2020-2021.",
"date": original["date"],
"open": False
}
def corona_2020_2021_nl(_path, original):
message = "Door de coronamaatregelen veranderen enkele zaken: ter plaatse eten is niet mogelijk " \
"(enkel afhalen) en er is een beperkter aanbod."
original["message"] = message
return original
def corona_2020_2021_en(_path, original):
message = "Due to the corona measures, some changes are made: only takeaway is possible " \
"and the offering is reduced."
original["message"] = message
return original
def corona_2020_2021_nl_red(_path, original):
message = "Enkel afhalen en een beperkter aanbod. De coronamaatregelen blijven van kracht!"
original["message"] = message
return original
def corona_2020_2021_cold(_path, original):
message = "Enkel cafetaria-aanbod en koude meeneemgerechten. De coronamaatregelen blijven van kracht!"
original["message"] = message
return original
def corona_2020_2021_en_red(_path, original):
message = "Due to the corona measures, some changes are made: only takeaway is possible " \
"and the offering is reduced. " \
"The restaurants and cafetaria's will remain open in code red."
original["message"] = message
return original
def exam_closure_sterre_2020(_path, original):
message = "Door examens zal de resto gesloten zijn op 4, 15, 18 en 26 januari."
original["message"] = message
original["open"] = False
return original
def exam_closure_dunant_2020(_path, original):
message = "Door examens zal de resto gesloten zijn op 4, 8, 15, 18, 22, 25 en 29 januari."
original["message"] = message
original["open"] = False
return original
def christmas(_path, original):
original["message"] = "Naast de UGent-verlofdagen zijn de resto's ook gesloten tijdens de eerste week van de " \
"kerstvakantie. "
original["open"] = False
return original
def exam_closure_en_2020(_path, original):
original["message"] = "Resto Sterre and Dunant are closed on some days in January due to exams. Check the site " \
"for more details."
return original
def dies_natalis_2021(_path, original):
original["message"] = "De resto's zijn gesloten op Dies Natalis."
original["open"] = False
return original
def dies_natalis_2021_en(_path, original):
original["message"] = "The restaurants are closed on Dies Natalis."
original["open"] = False
return original
def easter_2021_week1(_path, original):
original["message"] = "In de paasvakantie zullen resto's Sterre, Ardoyen, De Brug en UZ Gent open zijn, " \
"maar enkel als cafetaria. "
original["open"] = True
return original
def easter_2021_week2(_path, original):
original["message"] = "In de paasvakantie zullen resto's Sterre, Ardoyen, De Brug, UZ Gent en Coupure open zijn, " \
"maar enkel als cafetaria. "
original["open"] = True
return original
def summer_2021_1(_path, original):
original["message"] = "Cafetaria de Brug en resto's Ardoyen, Sterre en Merelbeke met een gewijzigd aanbod. Er zullen" \
" dan enkel broodjes en salad bowls te verkrijgen zijn. De zitplaatsen kunnen nog niet gebruikt worden."
original["open"] = True
return original
def summer_2021_2(_path, original):
original["message"] = "Cafetaria's de Brug en UZ Gent, en resto's Ardoyen, Sterre, Coupure en Merelbeke met een gewijzigd aanbod. Er zullen" \
" dan enkel broodjes en salad bowls te verkrijgen zijn. De zitplaatsen kunnen nog niet gebruikt worden."
original["open"] = True
return original
def brug_avond(_path, original):
original["message"] = "De Brug is vrijdagavond gesloten."
return original
def november_12_2021(_path, original):
original["message"] = "Vrijdag 12 november zullen alle resto's en cafetaria's gesloten zijn, behalve cafetaria De Brug."
original["open"] = False
return original
def november_12_2021_en(_path, original):
original["message"] = "Friday, the 12th of November, all restos and cafeterias will be closed except for cafeteria De Brug."
original["open"] = False
return original
def heymans_november_22_2021(_path, original):
original["message"] = "Maandag 22 en dinsdag 23 november is Cafetaria Heymans uitzonderlijk gesloten."
original["open"] = True
return original
def heymans_november_22_2021_en(_path, original):
original["message"] = "Monday, the 22th and Tuesday, the 23th of November, Cafetaria Heymans is exceptionally closed."
original["open"] = True
return original
def heymans_november_23_2021(_path, original):
original["message"] = "Cafetaria Heymans terug open op 23 november."
original["open"] = True
return original
def heymans_november_23_2021_en(_path, original):
original["message"] = "Cafetaria Heymans open again on 23th of November."
original["open"] = True
return original
def heymans_november_24_26_2021(_path, original):
original["message"] = "Cafetaria Heymans is gesloten op 24, 25 en 26 november. Cafetaria UZ Gent is open."
original["open"] = True
return original
def heymans_november_24_26_2021_en(_path, original):
original["message"] = "Cafetaria Heymans will be closed on 24, 25 and 26 November. Cafeteria UZ Gent is open."
original["open"] = True
return original
def christmas_2021(_path, original):
original["message"] = "De Brug Avond, cafetaria’s Boekentoren, Ledeganck en Heymans gesloten vanaf 20 december. Alle resto’s en cafetaria’s gesloten op 23 en 24 december en op 3 januari."
original["open"] = True
return original
def christmas_2021_en(_path, original):
original["message"] = "De Brug Evening, cafeterias Boekentoren, Ledeganck and Heymans closed as of December 20. All restos and cafeterias closed on December 23 and 24 and on January 3."
original["open"] = True
return original
def newyear_2022(_path, original):
original["message"] = "Van 4 t.e.m. 7 januari zijn enkel Resto De Brug, Resto Sterre, Resto Coupure, Resto Ardoyen en Cafetaria UZ Gent open. Enkel in Resto De Brug zijn er warme maaltijden."
original["open"] = True
return original
def newyear_2022_en(_path, original):
original["message"] = "From 4 to 7 January only Resto De Brug, Resto Sterre, Resto Coupure, Resto Ardoyen and Cafetaria UZ Gent will be open. Only in Resto De Brug there are warm meals."
original["open"] = True
return original
def closures_january_2022(_path, original):
original["message"] = "Cafetaria’s Boekentoren en Ledeganck gesloten, geen warme maaltijden meer in resto’s Dunant en Merelbeke."
original["open"] = True
return original
def closures_january_2022_en(_path, original):
original["message"] = "Cafeterias Boekentoren and Ledeganck closed, no more warm meals in restaurants Dunant and Merelbeke."
original["open"] = True
return original
def paasvakantie2022(_path, original):
original["message"] = "In de paasvakantie wijzigt de dienstverlening grondig. " \
"Warme maaltijden enkel in De Brug, uitgezonderd de sluiting op 8 en 15 april. " \
"Bekijk de website voor alle details over alle locaties."
return original
def easter2022(_path, original):
original["message"] = "In the easter recess, the service is heavily modified. " \
"Hot meals only in the Brug, except the closure on April 8th and April 15th. " \
"Check the website for more details on all locations."
return original
def zomer_2022_1(_path, original):
original["message"] = "In juli, augustus en september sluiten verschillende resto's of doen ze enkel dienst als cafetaria. " \
"Kijk op de website voor alle details."
return original
def summer_2022_1(_path, original):
original["message"] = "In July, August and September, multiple restaurants close or only act as a cafetaria. " \
"Check the website for more details."
return original
def zomer_2022_2(_path, original):
original["message"] = "Vanaf 18 juli tot en met 29 juli zijn alle resto's en cafetaria's gesloten."
original["open"] = False
return original
def summer_2022_2(_path, original):
original["message"] = "From July 18th until July 29th, all restaurants and cafeterias are closed."
original["open"] = False
return original
def close_time_nl(_path, original):
if "message" in original:
original["message"] += " Op 20 september zijn alle resto’s en cafetaria’s gesloten door onze teambuilding."
else:
original["message"] = "Op 20 september zijn alle resto’s en cafetaria’s gesloten door onze teambuilding."
original["open"] = False
return original
def close_time_en(_path, original):
if "message" in original:
original["message"] += " All restaurants and cafeterias are closed on 20 September, due to our team building."
else:
original["message"] = "All restaurants and cafeterias are closed on 20 September, due to our team building."
original["open"] = False
return original
def close_ardoyen_nl(_path, original):
if "message" in original:
original["message"] += " Op 16 september is Resto Ardoyen gesloten wegens het openingsevent van het FSVM2 onderzoeksgebouw."
else:
original["message"] = "Op 16 september is Resto Ardoyen gesloten wegens het openingsevent van het FSVM2 onderzoeksgebouw."
return original
def close_ardoyen_en(_path, original):
if "message" in original:
original["message"] += " Resto Ardoyen is closed on 16 September, due to the opening event of the FSVM2 research building."
else:
original["message"] = "Resto Ardoyen is closed on 16 September, due to the opening event of the FSVM2 research building."
return original
def no_more_soup_nl(_path, original):
original["message"] = "Door ernstige productieproblemen bij de leverancier is er tijdelijk geen soep meer te " \
"verkrijgen. We werken hard aan een oplossing en ten laatste 7 november zal er opnieuw " \
"soep zijn. Hou onze website en de TV-schermen in de gaten voor de meest recente update " \
"hierover. "
return original
def no_more_soup_en(_path, original):
original["message"] = "Due to serious production problems at the purveyor, soup will temporarily no longer be " \
"available. We are working hard to resolve this issue. Soup should be available again " \
"November 7th at the latest. Watch our website and tv screens for the most up-to-date " \
"information. "
return original
def create_changes(root_path):
return [
# Restjesmaand 2018
ManualChange(
replacer=restjesmaand18_replacer,
resto="nl-sintjansvest",
start=date(2018, 6, 1),
end=date(2018, 6, 30),
),
# Dingen voor de paasvakantie 19
ManualChange(
replacer=paasvakantie19_general,
resto="nl",
start=date(2019, 4, 8),
end=date(2019, 4, 19)
),
ManualChange(
replacer=paasvakantie19_en,
resto="en",
start=date(2019, 4, 8),
end=date(2019, 4, 19)
),
ManualChange(
replacer=paasvakantie19_brug,
resto="nl-debrug",
start=date(2019, 4, 8),
end=date(2019, 4, 19)
),
# Werken aan De Brug from 20/05/2019 - 30/09/2019
ManualChange(
replacer=werken_brug19_replacer,
resto="nl-debrug",
start=date(2019, 5, 20),
end=date(2019, 9, 29),
all_days=True
),
# Er is nog meer vertraging
ManualChange(
replacer=werken_brug19_replacer2,
resto="nl-debrug",
start=date(2019, 9, 30),
end=date(2019, 11, 11),
all_days=True
),
ManualChange(
replacer=tijdelijke_sluiting_sint_jansvest,
resto="nl-sintjansvest",
start=date(2019, 12, 16),
end=date(2020, 1, 10),
all_days=True,
),
# Corona
ManualChange(
replacer=corona_sluiting_nl,
resto=["nl", "nl-sintjansvest", "nl-debrug", "nl-heymans", "nl-kantienberg"],
start=date(2020, 3, 16),
end=date(2020, 6, 7),
all_days=True
),
ManualChange(
replacer=corona_sluiting_en,
resto="en",
start=date(2020, 3, 16),
end=date(2020, 6, 7),
all_days=True
),
ManualChange(
replacer=corona_heropening_nl,
resto="nl",
start=date(2020, 9, 7),
end=date(2020, 9, 20),
all_days=True
),
ManualChange(
replacer=corona_heropening_en,
resto="en",
start=date(2020, 9, 7),
end=date(2020, 9, 20),
all_days=True
),
ManualChange(
replacer=corona_closed_for_now,
resto=["nl-debrug", "nl-heymans"],
start=date(2020, 9, 7),
end=date(2020, 9, 20),
all_days=True
),
ManualChange(
replacer=kantienberg_2020,
resto="nl-kantienberg",
start=date(2020, 9, 7),
end=date(2021, 7, 1),
all_days=True
),
ManualChange(
replacer=corona_2020_2021_en,
resto="en",
start=date(2020, 9, 21),
end=date(2020, 10, 18)
),
ManualChange(
replacer=corona_2020_2021_nl,
resto=["nl", "nl-debrug", "nl-heymans"],
start=date(2020, 9, 21),
end=date(2020, 10, 18)
),
ManualChange(
replacer=corona_2020_2021_en_red,
resto="en",
start=date(2020, 10, 19),
end=date(2020, 12, 19)
),
ManualChange(
replacer=corona_2020_2021_nl_red,
resto=["nl-debrug", "nl-heymans", "nl-sterre", "nl-ardoyen"],
start=date(2020, 10, 19),
end=date(2020, 12, 19)
),
ManualChange(
replacer=corona_2020_2021_cold,
resto=["nl-coupure", "nl-dunant", "nl-merelbeke"],
start=date(2020, 11, 28),
end=date(2020, 12, 31)
),
ManualChange(
replacer=christmas,
resto=["nl-debrug", "nl-heymans", "nl-dunant", "nl-coupure", "nl-sterre", "nl-ardoyen", "nl-merelbeke"],
start=date(2020, 12, 21),
end=date(2020, 12, 25),
all_days=True
),
ManualChange(
replacer=exam_closure_dunant_2020,
resto="nl-dunant",
start=date(2021, 1, 4),
end=date(2021, 1, 4),
all_days=True
),
ManualChange(
replacer=exam_closure_dunant_2020,
resto="nl-dunant",
start=date(2021, 1, 8),
end=date(2021, 1, 8),
all_days=True
),
ManualChange(
replacer=exam_closure_dunant_2020,
resto="nl-dunant",
start=date(2021, 1, 15),
end=date(2021, 1, 15),
all_days=True
),
ManualChange(
replacer=exam_closure_dunant_2020,
resto="nl-dunant",
start=date(2021, 1, 18),
end=date(2021, 1, 18),
all_days=True
),
ManualChange(
replacer=exam_closure_dunant_2020,
resto="nl-dunant",
start=date(2021, 1, 22),
end=date(2021, 1, 22),
all_days=True
),
ManualChange(
replacer=exam_closure_dunant_2020,
resto="nl-dunant",
start=date(2021, 1, 25),
end=date(2021, 1, 25),
all_days=True
),
ManualChange(
replacer=exam_closure_dunant_2020,
resto="nl-dunant",
start=date(2021, 1, 29),
end=date(2021, 1, 29),
all_days=True
),
ManualChange(
replacer=exam_closure_sterre_2020,
resto="nl-sterre",
start=date(2021, 1, 4),
end=date(2021, 1, 5),
all_days=True
),
ManualChange(
replacer=exam_closure_sterre_2020,
resto="nl-sterre",
start=date(2021, 1, 4),
end=date(2021, 1, 4),
all_days=True
),
ManualChange(
replacer=exam_closure_sterre_2020,
resto="nl-sterre",
start=date(2021, 1, 15),
end=date(2021, 1, 15),
all_days=True
),
ManualChange(
replacer=exam_closure_sterre_2020,
resto="nl-sterre",
start=date(2021, 1, 18),
end=date(2021, 1, 18),
all_days=True
),
ManualChange(
replacer=exam_closure_sterre_2020,
resto="nl-sterre",
start=date(2021, 1, 26),
end=date(2021, 1, 26),
all_days=True
),
ManualChange(
replacer=exam_closure_en_2020,
resto="en",
start=date(2021, 1, 4),
end=date(2021, 1, 29),
all_days=False
),
ManualChange(
replacer=dies_natalis_2021,
resto=["nl-debrug", "nl-heymans", "nl-dunant", "nl-coupure", "nl-sterre", "nl-ardoyen", "nl-merelbeke"],
start=date(2021, 3, 19),
end=date(2021, 3, 19),
all_days=True
),
ManualChange(
replacer=dies_natalis_2021_en,
resto="en",
start=date(2021, 3, 19),
end=date(2021, 3, 19),
all_days=True
),
ManualChange(
replacer=easter_2021_week1,
resto=["nl-debrug", "nl-heymans", "nl-sterre", "nl-ardoyen"],
start=date(2021, 4, 5),
end=date(2021, 4, 9),
all_days=True
),
ManualChange(
replacer=easter_2021_week2,
resto=["nl-debrug", "nl-heymans", "nl-sterre", "nl-ardoyen", "nl-coupure"],
start=date(2021, 4, 12),
end=date(2021, 4, 16),
all_days=True
),
ManualChange(
replacer=summer_2021_1,
resto=["nl-debrug", "nl-sterre", "nl-ardoyen", "nl-merelbeke"],
start=date(2021, 8, 9),
end=date(2021, 4, 16),
all_days=True
),
ManualChange(
replacer=summer_2021_2,
resto=["nl-sterre", "nl-merelbeke", "nl-coupure", "nl-heymans"],
start=date(2021, 8, 16),
end=date(2021, 9, 13),
all_days=True
),
ManualChange(
replacer=summer_2021_2,
resto="nl-debrug",
start=date(2021, 8, 16),
end=date(2021, 9, 1),
all_days=True
),
ManualChange(
replacer=summer_2021_2,
resto="nl-ardoyen",
start=date(2021, 8, 16),
end=date(2021, 8, 25),
all_days=True
),
*[ManualChange(
replacer=brug_avond,
resto="nl",
start=date(2021, 11, 22) + timedelta(days=x),
end=date(2021, 11, 22) + timedelta(days=x)
) for x in range((date(2021, 12, 31) - date(2021, 11, 22)).days + 1) if
(date(2021, 11, 22) + timedelta(days=x)).weekday() == 4],
ManualChange(
replacer=november_12_2021,
resto=["nl"],
start=date(2021, 11, 12),
end=date(2021, 11, 12),
all_days=True
),
ManualChange(
replacer=november_12_2021_en,
resto=["en"],
start=date(2021, 11, 12),
end=date(2021, 11, 12),
all_days=True
),
ManualChange(
replacer=heymans_november_22_2021,
resto=["nl"],
start=date(2021, 11, 22),
end=date(2021, 11, 22),
all_days=True
),
ManualChange(
replacer=heymans_november_22_2021_en,
resto=["en"],
start=date(2021, 11, 22),
end=date(2021, 11, 22),
all_days=True
),
ManualChange(
replacer=heymans_november_23_2021,
resto=["nl"],
start=date(2021, 11, 23),
end=date(2021, 11, 23),
all_days=True
),
ManualChange(
replacer=heymans_november_23_2021_en,
resto=["en"],
start=date(2021, 11, 23),
end=date(2021, 11, 23),
all_days=True
),
ManualChange(
replacer=heymans_november_24_26_2021,
resto=["nl"],
start=date(2021, 11, 24),
end=date(2021, 11, 26),
all_days=True
),
ManualChange(
replacer=heymans_november_24_26_2021_en,
resto=["en"],
start=date(2021, 11, 24),
end=date(2021, 11, 26),
all_days=True
),
ManualChange(
replacer=christmas_2021,
resto=["nl"],
start=date(2021, 12, 20),
end=date(2022, 1, 3),
all_days=True
),
ManualChange(
replacer=christmas_2021_en,
resto=["en"],
start=date(2021, 12, 20),
end=date(2022, 1, 3),
all_days=True
),
ManualChange(
replacer=newyear_2022,
resto=["nl"],
start=date(2022, 1, 4),
end=date(2022, 1, 7),
all_days=True
),
ManualChange(
replacer=newyear_2022_en,
resto=["en"],
start=date(2022, 1, 4),
end=date(2022, 1, 7),
all_days=True
),
ManualChange(
replacer=closures_january_2022,
resto=["nl"],
start=date(2022, 1, 17),
end=date(2022, 1, 28),
all_days=True
),
ManualChange(
replacer=closures_january_2022_en,
resto=["en"],
start=date(2022, 1, 17),
end=date(2022, 1, 28),
all_days=True
),
ManualChange(
replacer=paasvakantie2022,
resto=["nl"],
start=date(2022, 4, 4),
end=date(2022, 4, 17),
all_days=True
),
ManualChange(
replacer=easter2022,
resto=["en"],
start=date(2022, 4, 4),
end=date(2022, 4, 17),
all_days=True
),
ManualChange(
replacer=zomer_2022_1,
resto=["nl"],
start=date(2022, 6, 27),
end=date(2022, 7, 15),
all_days=True
),
ManualChange(
replacer=summer_2022_1,
resto=["en"],
start=date(2022, 6, 27),
end=date(2022, 7, 15),
all_days=True
),
ManualChange(
replacer=zomer_2022_2,
resto=["nl"],
start=date(2022, 7, 18),
end=date(2022, 7, 29),
all_days=True
),
ManualChange(
replacer=summer_2022_2,
resto=["en"],
start=date(2022, 7, 18),
end=date(2022, 7, 29),
all_days=True
),
ManualChange(
replacer=zomer_2022_1,
resto=["nl"],
start=date(2022, 8, 1),
end=date(2022, 9, 16),
all_days=True
),
ManualChange(
replacer=summer_2022_1,
resto=["en"],
start=date(2022, 8, 1),
end=date(2022, 9, 16),
all_days=True
),
ManualChange(
replacer=close_time_nl,
resto=["nl"],
start=date(2022, 9, 14),
end=date(2022, 9, 20),
all_days=True
),
ManualChange(
replacer=close_time_en,
resto=["en"],
start=date(2022, 9, 14),
end=date(2022, 9, 20),
all_days=True
),
ManualChange(
replacer=close_ardoyen_nl,
resto=["nl"],
start=date(2022, 9, 15),
end=date(2022, 9, 16),
all_days=True
),
ManualChange(
replacer=close_ardoyen_en,
resto=["en"],
start=date(2022, 9, 15),
end=date(2022, 9, 16),
all_days=True
),
ManualChange(
replacer=no_more_soup_nl,
resto=["nl"],
start=date(2022, 10, 26),
end=date(2022, 11, 7),
all_days=True
),
ManualChange(
replacer=no_more_soup_en,
resto=["en"],
start=date(2022, 10, 26),
end=date(2022, 11, 7),
all_days=True
)
]
# Actually do things ----------------------------------------------------------
def apply_existing_menus_only(output, manual_change, dates):
"""Apply the change to only existing menus"""
print(f"Matching existing menus from {manual_change.resto} between {manual_change.start} to {manual_change.end}")
print("====================================================================")
for resto in manual_change.resto:
files = glob.glob(f"{output}/menu/{resto}/*/*/*.json")
file_pattern = re.compile(r'.*/(\d+)/(\d+)/(\d+)\.json$')
for path in files:
# Check if this file applies or not.
m = file_pattern.search(path.replace("\\", "/"))
file_date = date(int(m.group(1)), int(m.group(2)), int(m.group(3)))
if not manual_change.is_applicable(file_date):
continue
with open(path, 'r') as f:
overview = json.loads(f.read())
_new_content = manual_change.replacer(path, overview)
dates[resto][_new_content["date"]] = _new_content
new_content = json.dumps(_new_content)
with open(path, 'w') as f:
f.write(new_content)
def apply_all_menus(output, manual_change, dates):
"""Apply the change to all dates in the applicable range. If no menu exist for a day, it will be created."""
print(f"Matching all menus from {manual_change.resto} between {manual_change.start} to {manual_change.end}")
print("====================================================================")
for applicable_date in manual_change.date_range():
year = applicable_date.year
month = applicable_date.month
day = applicable_date.day
# Get existing file if it exists
for resto in manual_change.resto:
path = f"{output}/menu/{resto}/{year}/{month}/{day}.json"
try:
with open(path, 'r') as f:
menu = json.loads(f.read())
except IOError:
os.makedirs(os.path.dirname(path), exist_ok=True)
menu = {'open': False, 'date': applicable_date.strftime('%Y-%m-%d'), 'meals': [], 'vegetables': []}
# Apply the changes
_new_content = manual_change.replacer(path, menu)
dates[resto][_new_content["date"]] = _new_content
new_content = json.dumps(_new_content)
with open(path, 'w+') as f:
f.write(new_content)
def main(output):
to_apply = create_changes(output)
dates = defaultdict(dict)
for manual_change in to_apply:
if manual_change.all_days:
apply_all_menus(output, manual_change, dates)
else:
apply_existing_menus_only(output, manual_change, dates)
for manual_change in to_apply:
print("Rebuilding overviews")
for resto in manual_change.resto:
match_glob = f"menu/{resto}/overview.json"
print(match_glob)
overviews = glob.glob(f"{output}/{match_glob}")
# For each overview that should be rebuild
for path in overviews:
print(f"Rebuilding {path}")
new_overview = []
with open(path, 'r') as f:
overview = json.loads(f.read())
last_day = None
# If the date is modified, replace it
for day in overview:
if day["date"] in dates[resto]:
print(f"Updating {day['date']}")
new_overview.append(dates[resto][day["date"]])
else:
print(f"Keeping {day['date']}")
new_overview.append(day)
last_day = day["date"]
# We want to provide at least ten days in the future.
to_add = max(OVERVIEW_COUNT - len(overview), 0)
if last_day:
last_day = datetime.strptime(last_day, '%Y-%m-%d').date()
for day in dates[resto]:
dday = datetime.strptime(day, '%Y-%m-%d').date()
if ((last_day and dday <= last_day) or (last_day is None and dday < date.today())) or to_add <= 0:
continue
new_overview.append(dates[resto][day])
to_add -= 1
with open(path, 'w') as f:
f.write(json.dumps(new_overview))
print("Wrote updated overview")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Apply manual corrections to scraped menu')
parser.add_argument('output', help='Folder of v2 output.')
args = parser.parse_args()
main(args.output)
Change end date of no more soup message
#!/usr/bin/env python3
import argparse
import glob
import json
import os
import re
from collections import defaultdict
from datetime import date, timedelta, datetime
OVERVIEW_COUNT = 10
# Common things ---------------------------------------------------------------
# See main at bottom
class ManualChange:
"""
Apply a change to a range of menus in the v2 API. v1 is not supported.
"""
def __init__(self, replacer, resto, start, end, all_days=False):
"""
:param replacer: The function that will do the replacements. It will receive the path to the file and the
original menu.
:param start: The start date (inclusive).
:param end: The end date (inclusive).
:param resto: Which restaurant(s) to apply to.
:param all_days: If the message should be added for all weekdays in the range. If false (the default), the
changes will only be applied if there already is a menu for the day.
"""
self.replacer = replacer
self.start = start
self.end = end
self.resto = resto
if isinstance(self.resto, str):
self.resto = [self.resto]
assert isinstance(self.resto, list)
self.all_days = all_days
def is_applicable(self, menu_date):
"""Check if this change is applicable to the given date"""
return self.start <= menu_date <= self.end
def date_range(self):
"""Return an iterator over the applicable range. Only weekdays are returned."""
for n in range(int((self.end - self.start).days) + 1):
result = self.start + timedelta(n)
if result.weekday() < 5:
yield result
# Restjesmaand Zomer 18
# Sint-Jansvest die geen menu meer serveert, alleen overschotten.
def restjesmaand18_replacer(_path, original):
# original: {"date": "2018-06-14", "meals": [], "open": false, "vegetables": []}
name = ("Om voedseloverschotten op het einde van het academiejaar te beperken, "
"kunnen we geen dagmenu presenteren. "
"Ga langs en laat je verrassen door ons keukenpersoneel.")
return {
"message": name,
"date": original["date"],
"meals": [],
"open": True,
"vegetables": [],
}
# Paasvakantie 2019
def paasvakantie19_general(_path, original):
original['message'] = ("Tijdens de paasvakantie zijn resto's Campus Sterre en Campus Merelbeke geopend als "
"cafetaria.")
original['open'] = True
return original
def paasvakantie19_en(_path, original):
original['message'] = 'During the Easter Holiday restos Campus Sterre and Campus Merelbeke operate as cafetaria.'
original['open'] = True
return original
def paasvakantie19_brug(_path, original):
original['message'] = "Tijdens de paasvakantie is De Brug enkel 's middags geopend."
return original
# Werken in De Brug waardoor de resto gesloten is.
def werken_brug19_replacer(_path, original):
message = ('De Brug sluit van 20 mei tot 30 september 2019 voor verbouwingswerken. Tijdens de sluiting neemt resto '
'Kantienberg de functies en het aanbod van de Brug over, zoals de avondopening.')
return {
"message": message,
"date": original["date"],
"open": False
}
def werken_brug19_replacer2(_path, original):
message = ("Resto De Brug en Cafetaria De Brug zijn nog even gesloten in afwachting van het voltooien van de"
" werken. Tot dan kan je's middags en 's avonds terecht in Resto Kantienberg. Wij houden jullie op de"
" hoogte!<br>'s Middags is Resto Sint-Jansvest tijdelijk een reguliere resto met een uitgebreid aanbod"
" aan belegde broodjes. Enkel soep of broodjes nodig? Dan is Cafetaria campus Boekentoren (via"
" Blandijnberg) zeer dichtbij.")
return {
"message": message,
"date": original["date"],
"open": False
}
def tijdelijke_sluiting_sint_jansvest(_path, original):
message = "Resto Sint-Jansvest is tijdelijk gesloten wegens wegenwerken. Tijdens de werken kan u terecht in De " \
"Brug. "
return {
"message": message,
"date": original["date"],
"open": False,
"meals": original.get("meals", [])
}
def corona_sluiting_nl(_path, original):
message = "De studentenrestaurants en cafetaria's sluiten vanaf maandag 16 maart 2020 de deuren. " \
"De UGent neemt die maatregel om verdere verspreiding van het coronavirus tot een minimum te beperken. " \
"De sluiting loopt zeker tot en met 7 juni 2020."
return {
"message": message,
"date": original["date"],
"open": False
}
def corona_sluiting_en(_path, original):
message = "The student restaurants and cafeterias will be closed as from Monday 16 March 2020. " \
"Ghent University is taking this measure to minimize the further spreading of the coronavirus. " \
"The closure will certainly last until 7 June 2020."
return {
"message": message,
"date": original["date"],
"open": False
}
def corona_heropening_nl(_path, original):
message = "Ter plaatse eten is momenteel niet mogelijk; enkel takeaway van een beperkt aanbod. De coronamaatregelen blijven van kracht! " \
"Resto Dunant, Coupure en Sterre en van cafetaria UZ Gent en Boekentoren zijn opnieuw open. " \
"Bij de start van het academiejaar volgen de andere locaties."
return {
"message": message,
"date": original["date"],
"open": True,
"meals": [{
"kind": "meat",
"type": "main",
"name": "Spaghetti bolognese met kaas",
"price": "\u20ac 3,60"
}, {
"kind": "vegetarian",
"type": "main",
"name": "Salad bowl: Caesar",
"price": ""
}, {
"kind": "vegetarian",
"type": "main",
"name": "Salad bowl: Tomaat-Mozzarella",
"price": ""
}, {
"kind": "soup",
"type": "main",
"name": "Dagsoep",
"price": ""
}],
"vegetables": []
}
def corona_heropening_en(_path, original):
message = "The canteen is closed; only takeaway of a reduced offering is possible. The Corona measures remain active! " \
"Resto Dunant, Coupure & Sterre and cafetaria UZ Gent & Boekentoren are open. " \
"At the start of the academic year, the other locations will follow."
return {
"message": message,
"date": original["date"],
"open": True,
"meals": [{
"kind": "meat",
"type": "main",
"name": "Spaghetti bolognese with cheese",
"price": "\u20ac 3,60"
}, {
"kind": "vegetarian",
"type": "main",
"name": "Salad bowl: Caesar",
"price": ""
}, {
"kind": "vegetarian",
"type": "main",
"name": "Salad bowl: Tomato-Mozzarella",
"price": ""
}, {
"kind": "soup",
"type": "main",
"name": "Soup of the day",
"price": ""
}],
"vegetables": []
}
def corona_closed_for_now(_path, original):
message = "Resto Dunant, Coupure en Sterre en van cafetaria UZ Gent en Boekentoren zijn opnieuw open. " \
"Bij de start van het academiejaar volgen de andere locaties."
return {
"message": message,
"date": original["date"],
"open": False
}
def kantienberg_2020(_path, original):
return {
"message": "Resto Kantienberg blijft gesloten tijdens academiejaar 2020-2021.",
"date": original["date"],
"open": False
}
def corona_2020_2021_nl(_path, original):
message = "Door de coronamaatregelen veranderen enkele zaken: ter plaatse eten is niet mogelijk " \
"(enkel afhalen) en er is een beperkter aanbod."
original["message"] = message
return original
def corona_2020_2021_en(_path, original):
message = "Due to the corona measures, some changes are made: only takeaway is possible " \
"and the offering is reduced."
original["message"] = message
return original
def corona_2020_2021_nl_red(_path, original):
message = "Enkel afhalen en een beperkter aanbod. De coronamaatregelen blijven van kracht!"
original["message"] = message
return original
def corona_2020_2021_cold(_path, original):
message = "Enkel cafetaria-aanbod en koude meeneemgerechten. De coronamaatregelen blijven van kracht!"
original["message"] = message
return original
def corona_2020_2021_en_red(_path, original):
message = "Due to the corona measures, some changes are made: only takeaway is possible " \
"and the offering is reduced. " \
"The restaurants and cafetaria's will remain open in code red."
original["message"] = message
return original
def exam_closure_sterre_2020(_path, original):
message = "Door examens zal de resto gesloten zijn op 4, 15, 18 en 26 januari."
original["message"] = message
original["open"] = False
return original
def exam_closure_dunant_2020(_path, original):
message = "Door examens zal de resto gesloten zijn op 4, 8, 15, 18, 22, 25 en 29 januari."
original["message"] = message
original["open"] = False
return original
def christmas(_path, original):
original["message"] = "Naast de UGent-verlofdagen zijn de resto's ook gesloten tijdens de eerste week van de " \
"kerstvakantie. "
original["open"] = False
return original
def exam_closure_en_2020(_path, original):
original["message"] = "Resto Sterre and Dunant are closed on some days in January due to exams. Check the site " \
"for more details."
return original
def dies_natalis_2021(_path, original):
original["message"] = "De resto's zijn gesloten op Dies Natalis."
original["open"] = False
return original
def dies_natalis_2021_en(_path, original):
original["message"] = "The restaurants are closed on Dies Natalis."
original["open"] = False
return original
def easter_2021_week1(_path, original):
original["message"] = "In de paasvakantie zullen resto's Sterre, Ardoyen, De Brug en UZ Gent open zijn, " \
"maar enkel als cafetaria. "
original["open"] = True
return original
def easter_2021_week2(_path, original):
original["message"] = "In de paasvakantie zullen resto's Sterre, Ardoyen, De Brug, UZ Gent en Coupure open zijn, " \
"maar enkel als cafetaria. "
original["open"] = True
return original
def summer_2021_1(_path, original):
original["message"] = "Cafetaria de Brug en resto's Ardoyen, Sterre en Merelbeke met een gewijzigd aanbod. Er zullen" \
" dan enkel broodjes en salad bowls te verkrijgen zijn. De zitplaatsen kunnen nog niet gebruikt worden."
original["open"] = True
return original
def summer_2021_2(_path, original):
original["message"] = "Cafetaria's de Brug en UZ Gent, en resto's Ardoyen, Sterre, Coupure en Merelbeke met een gewijzigd aanbod. Er zullen" \
" dan enkel broodjes en salad bowls te verkrijgen zijn. De zitplaatsen kunnen nog niet gebruikt worden."
original["open"] = True
return original
def brug_avond(_path, original):
original["message"] = "De Brug is vrijdagavond gesloten."
return original
def november_12_2021(_path, original):
original["message"] = "Vrijdag 12 november zullen alle resto's en cafetaria's gesloten zijn, behalve cafetaria De Brug."
original["open"] = False
return original
def november_12_2021_en(_path, original):
original["message"] = "Friday, the 12th of November, all restos and cafeterias will be closed except for cafeteria De Brug."
original["open"] = False
return original
def heymans_november_22_2021(_path, original):
original["message"] = "Maandag 22 en dinsdag 23 november is Cafetaria Heymans uitzonderlijk gesloten."
original["open"] = True
return original
def heymans_november_22_2021_en(_path, original):
original["message"] = "Monday, the 22th and Tuesday, the 23th of November, Cafetaria Heymans is exceptionally closed."
original["open"] = True
return original
def heymans_november_23_2021(_path, original):
original["message"] = "Cafetaria Heymans terug open op 23 november."
original["open"] = True
return original
def heymans_november_23_2021_en(_path, original):
original["message"] = "Cafetaria Heymans open again on 23th of November."
original["open"] = True
return original
def heymans_november_24_26_2021(_path, original):
original["message"] = "Cafetaria Heymans is gesloten op 24, 25 en 26 november. Cafetaria UZ Gent is open."
original["open"] = True
return original
def heymans_november_24_26_2021_en(_path, original):
original["message"] = "Cafetaria Heymans will be closed on 24, 25 and 26 November. Cafeteria UZ Gent is open."
original["open"] = True
return original
def christmas_2021(_path, original):
original["message"] = "De Brug Avond, cafetaria’s Boekentoren, Ledeganck en Heymans gesloten vanaf 20 december. Alle resto’s en cafetaria’s gesloten op 23 en 24 december en op 3 januari."
original["open"] = True
return original
def christmas_2021_en(_path, original):
original["message"] = "De Brug Evening, cafeterias Boekentoren, Ledeganck and Heymans closed as of December 20. All restos and cafeterias closed on December 23 and 24 and on January 3."
original["open"] = True
return original
def newyear_2022(_path, original):
original["message"] = "Van 4 t.e.m. 7 januari zijn enkel Resto De Brug, Resto Sterre, Resto Coupure, Resto Ardoyen en Cafetaria UZ Gent open. Enkel in Resto De Brug zijn er warme maaltijden."
original["open"] = True
return original
def newyear_2022_en(_path, original):
original["message"] = "From 4 to 7 January only Resto De Brug, Resto Sterre, Resto Coupure, Resto Ardoyen and Cafetaria UZ Gent will be open. Only in Resto De Brug there are warm meals."
original["open"] = True
return original
def closures_january_2022(_path, original):
original["message"] = "Cafetaria’s Boekentoren en Ledeganck gesloten, geen warme maaltijden meer in resto’s Dunant en Merelbeke."
original["open"] = True
return original
def closures_january_2022_en(_path, original):
original["message"] = "Cafeterias Boekentoren and Ledeganck closed, no more warm meals in restaurants Dunant and Merelbeke."
original["open"] = True
return original
def paasvakantie2022(_path, original):
original["message"] = "In de paasvakantie wijzigt de dienstverlening grondig. " \
"Warme maaltijden enkel in De Brug, uitgezonderd de sluiting op 8 en 15 april. " \
"Bekijk de website voor alle details over alle locaties."
return original
def easter2022(_path, original):
original["message"] = "In the easter recess, the service is heavily modified. " \
"Hot meals only in the Brug, except the closure on April 8th and April 15th. " \
"Check the website for more details on all locations."
return original
def zomer_2022_1(_path, original):
original["message"] = "In juli, augustus en september sluiten verschillende resto's of doen ze enkel dienst als cafetaria. " \
"Kijk op de website voor alle details."
return original
def summer_2022_1(_path, original):
original["message"] = "In July, August and September, multiple restaurants close or only act as a cafetaria. " \
"Check the website for more details."
return original
def zomer_2022_2(_path, original):
original["message"] = "Vanaf 18 juli tot en met 29 juli zijn alle resto's en cafetaria's gesloten."
original["open"] = False
return original
def summer_2022_2(_path, original):
original["message"] = "From July 18th until July 29th, all restaurants and cafeterias are closed."
original["open"] = False
return original
def close_time_nl(_path, original):
if "message" in original:
original["message"] += " Op 20 september zijn alle resto’s en cafetaria’s gesloten door onze teambuilding."
else:
original["message"] = "Op 20 september zijn alle resto’s en cafetaria’s gesloten door onze teambuilding."
original["open"] = False
return original
def close_time_en(_path, original):
if "message" in original:
original["message"] += " All restaurants and cafeterias are closed on 20 September, due to our team building."
else:
original["message"] = "All restaurants and cafeterias are closed on 20 September, due to our team building."
original["open"] = False
return original
def close_ardoyen_nl(_path, original):
if "message" in original:
original["message"] += " Op 16 september is Resto Ardoyen gesloten wegens het openingsevent van het FSVM2 onderzoeksgebouw."
else:
original["message"] = "Op 16 september is Resto Ardoyen gesloten wegens het openingsevent van het FSVM2 onderzoeksgebouw."
return original
def close_ardoyen_en(_path, original):
if "message" in original:
original["message"] += " Resto Ardoyen is closed on 16 September, due to the opening event of the FSVM2 research building."
else:
original["message"] = "Resto Ardoyen is closed on 16 September, due to the opening event of the FSVM2 research building."
return original
def no_more_soup_nl(_path, original):
original["message"] = "Door ernstige productieproblemen bij de leverancier is er tijdelijk geen soep meer te " \
"verkrijgen. We werken hard aan een oplossing en ten laatste 7 november zal er opnieuw " \
"soep zijn. Hou onze website en de TV-schermen in de gaten voor de meest recente update " \
"hierover. "
return original
def no_more_soup_en(_path, original):
original["message"] = "Due to serious production problems at the purveyor, soup will temporarily no longer be " \
"available. We are working hard to resolve this issue. Soup should be available again " \
"November 7th at the latest. Watch our website and tv screens for the most up-to-date " \
"information. "
return original
def create_changes(root_path):
return [
# Restjesmaand 2018
ManualChange(
replacer=restjesmaand18_replacer,
resto="nl-sintjansvest",
start=date(2018, 6, 1),
end=date(2018, 6, 30),
),
# Dingen voor de paasvakantie 19
ManualChange(
replacer=paasvakantie19_general,
resto="nl",
start=date(2019, 4, 8),
end=date(2019, 4, 19)
),
ManualChange(
replacer=paasvakantie19_en,
resto="en",
start=date(2019, 4, 8),
end=date(2019, 4, 19)
),
ManualChange(
replacer=paasvakantie19_brug,
resto="nl-debrug",
start=date(2019, 4, 8),
end=date(2019, 4, 19)
),
# Werken aan De Brug from 20/05/2019 - 30/09/2019
ManualChange(
replacer=werken_brug19_replacer,
resto="nl-debrug",
start=date(2019, 5, 20),
end=date(2019, 9, 29),
all_days=True
),
# Er is nog meer vertraging
ManualChange(
replacer=werken_brug19_replacer2,
resto="nl-debrug",
start=date(2019, 9, 30),
end=date(2019, 11, 11),
all_days=True
),
ManualChange(
replacer=tijdelijke_sluiting_sint_jansvest,
resto="nl-sintjansvest",
start=date(2019, 12, 16),
end=date(2020, 1, 10),
all_days=True,
),
# Corona
ManualChange(
replacer=corona_sluiting_nl,
resto=["nl", "nl-sintjansvest", "nl-debrug", "nl-heymans", "nl-kantienberg"],
start=date(2020, 3, 16),
end=date(2020, 6, 7),
all_days=True
),
ManualChange(
replacer=corona_sluiting_en,
resto="en",
start=date(2020, 3, 16),
end=date(2020, 6, 7),
all_days=True
),
ManualChange(
replacer=corona_heropening_nl,
resto="nl",
start=date(2020, 9, 7),
end=date(2020, 9, 20),
all_days=True
),
ManualChange(
replacer=corona_heropening_en,
resto="en",
start=date(2020, 9, 7),
end=date(2020, 9, 20),
all_days=True
),
ManualChange(
replacer=corona_closed_for_now,
resto=["nl-debrug", "nl-heymans"],
start=date(2020, 9, 7),
end=date(2020, 9, 20),
all_days=True
),
ManualChange(
replacer=kantienberg_2020,
resto="nl-kantienberg",
start=date(2020, 9, 7),
end=date(2021, 7, 1),
all_days=True
),
ManualChange(
replacer=corona_2020_2021_en,
resto="en",
start=date(2020, 9, 21),
end=date(2020, 10, 18)
),
ManualChange(
replacer=corona_2020_2021_nl,
resto=["nl", "nl-debrug", "nl-heymans"],
start=date(2020, 9, 21),
end=date(2020, 10, 18)
),
ManualChange(
replacer=corona_2020_2021_en_red,
resto="en",
start=date(2020, 10, 19),
end=date(2020, 12, 19)
),
ManualChange(
replacer=corona_2020_2021_nl_red,
resto=["nl-debrug", "nl-heymans", "nl-sterre", "nl-ardoyen"],
start=date(2020, 10, 19),
end=date(2020, 12, 19)
),
ManualChange(
replacer=corona_2020_2021_cold,
resto=["nl-coupure", "nl-dunant", "nl-merelbeke"],
start=date(2020, 11, 28),
end=date(2020, 12, 31)
),
ManualChange(
replacer=christmas,
resto=["nl-debrug", "nl-heymans", "nl-dunant", "nl-coupure", "nl-sterre", "nl-ardoyen", "nl-merelbeke"],
start=date(2020, 12, 21),
end=date(2020, 12, 25),
all_days=True
),
ManualChange(
replacer=exam_closure_dunant_2020,
resto="nl-dunant",
start=date(2021, 1, 4),
end=date(2021, 1, 4),
all_days=True
),
ManualChange(
replacer=exam_closure_dunant_2020,
resto="nl-dunant",
start=date(2021, 1, 8),
end=date(2021, 1, 8),
all_days=True
),
ManualChange(
replacer=exam_closure_dunant_2020,
resto="nl-dunant",
start=date(2021, 1, 15),
end=date(2021, 1, 15),
all_days=True
),
ManualChange(
replacer=exam_closure_dunant_2020,
resto="nl-dunant",
start=date(2021, 1, 18),
end=date(2021, 1, 18),
all_days=True
),
ManualChange(
replacer=exam_closure_dunant_2020,
resto="nl-dunant",
start=date(2021, 1, 22),
end=date(2021, 1, 22),
all_days=True
),
ManualChange(
replacer=exam_closure_dunant_2020,
resto="nl-dunant",
start=date(2021, 1, 25),
end=date(2021, 1, 25),
all_days=True
),
ManualChange(
replacer=exam_closure_dunant_2020,
resto="nl-dunant",
start=date(2021, 1, 29),
end=date(2021, 1, 29),
all_days=True
),
ManualChange(
replacer=exam_closure_sterre_2020,
resto="nl-sterre",
start=date(2021, 1, 4),
end=date(2021, 1, 5),
all_days=True
),
ManualChange(
replacer=exam_closure_sterre_2020,
resto="nl-sterre",
start=date(2021, 1, 4),
end=date(2021, 1, 4),
all_days=True
),
ManualChange(
replacer=exam_closure_sterre_2020,
resto="nl-sterre",
start=date(2021, 1, 15),
end=date(2021, 1, 15),
all_days=True
),
ManualChange(
replacer=exam_closure_sterre_2020,
resto="nl-sterre",
start=date(2021, 1, 18),
end=date(2021, 1, 18),
all_days=True
),
ManualChange(
replacer=exam_closure_sterre_2020,
resto="nl-sterre",
start=date(2021, 1, 26),
end=date(2021, 1, 26),
all_days=True
),
ManualChange(
replacer=exam_closure_en_2020,
resto="en",
start=date(2021, 1, 4),
end=date(2021, 1, 29),
all_days=False
),
ManualChange(
replacer=dies_natalis_2021,
resto=["nl-debrug", "nl-heymans", "nl-dunant", "nl-coupure", "nl-sterre", "nl-ardoyen", "nl-merelbeke"],
start=date(2021, 3, 19),
end=date(2021, 3, 19),
all_days=True
),
ManualChange(
replacer=dies_natalis_2021_en,
resto="en",
start=date(2021, 3, 19),
end=date(2021, 3, 19),
all_days=True
),
ManualChange(
replacer=easter_2021_week1,
resto=["nl-debrug", "nl-heymans", "nl-sterre", "nl-ardoyen"],
start=date(2021, 4, 5),
end=date(2021, 4, 9),
all_days=True
),
ManualChange(
replacer=easter_2021_week2,
resto=["nl-debrug", "nl-heymans", "nl-sterre", "nl-ardoyen", "nl-coupure"],
start=date(2021, 4, 12),
end=date(2021, 4, 16),
all_days=True
),
ManualChange(
replacer=summer_2021_1,
resto=["nl-debrug", "nl-sterre", "nl-ardoyen", "nl-merelbeke"],
start=date(2021, 8, 9),
end=date(2021, 4, 16),
all_days=True
),
ManualChange(
replacer=summer_2021_2,
resto=["nl-sterre", "nl-merelbeke", "nl-coupure", "nl-heymans"],
start=date(2021, 8, 16),
end=date(2021, 9, 13),
all_days=True
),
ManualChange(
replacer=summer_2021_2,
resto="nl-debrug",
start=date(2021, 8, 16),
end=date(2021, 9, 1),
all_days=True
),
ManualChange(
replacer=summer_2021_2,
resto="nl-ardoyen",
start=date(2021, 8, 16),
end=date(2021, 8, 25),
all_days=True
),
*[ManualChange(
replacer=brug_avond,
resto="nl",
start=date(2021, 11, 22) + timedelta(days=x),
end=date(2021, 11, 22) + timedelta(days=x)
) for x in range((date(2021, 12, 31) - date(2021, 11, 22)).days + 1) if
(date(2021, 11, 22) + timedelta(days=x)).weekday() == 4],
ManualChange(
replacer=november_12_2021,
resto=["nl"],
start=date(2021, 11, 12),
end=date(2021, 11, 12),
all_days=True
),
ManualChange(
replacer=november_12_2021_en,
resto=["en"],
start=date(2021, 11, 12),
end=date(2021, 11, 12),
all_days=True
),
ManualChange(
replacer=heymans_november_22_2021,
resto=["nl"],
start=date(2021, 11, 22),
end=date(2021, 11, 22),
all_days=True
),
ManualChange(
replacer=heymans_november_22_2021_en,
resto=["en"],
start=date(2021, 11, 22),
end=date(2021, 11, 22),
all_days=True
),
ManualChange(
replacer=heymans_november_23_2021,
resto=["nl"],
start=date(2021, 11, 23),
end=date(2021, 11, 23),
all_days=True
),
ManualChange(
replacer=heymans_november_23_2021_en,
resto=["en"],
start=date(2021, 11, 23),
end=date(2021, 11, 23),
all_days=True
),
ManualChange(
replacer=heymans_november_24_26_2021,
resto=["nl"],
start=date(2021, 11, 24),
end=date(2021, 11, 26),
all_days=True
),
ManualChange(
replacer=heymans_november_24_26_2021_en,
resto=["en"],
start=date(2021, 11, 24),
end=date(2021, 11, 26),
all_days=True
),
ManualChange(
replacer=christmas_2021,
resto=["nl"],
start=date(2021, 12, 20),
end=date(2022, 1, 3),
all_days=True
),
ManualChange(
replacer=christmas_2021_en,
resto=["en"],
start=date(2021, 12, 20),
end=date(2022, 1, 3),
all_days=True
),
ManualChange(
replacer=newyear_2022,
resto=["nl"],
start=date(2022, 1, 4),
end=date(2022, 1, 7),
all_days=True
),
ManualChange(
replacer=newyear_2022_en,
resto=["en"],
start=date(2022, 1, 4),
end=date(2022, 1, 7),
all_days=True
),
ManualChange(
replacer=closures_january_2022,
resto=["nl"],
start=date(2022, 1, 17),
end=date(2022, 1, 28),
all_days=True
),
ManualChange(
replacer=closures_january_2022_en,
resto=["en"],
start=date(2022, 1, 17),
end=date(2022, 1, 28),
all_days=True
),
ManualChange(
replacer=paasvakantie2022,
resto=["nl"],
start=date(2022, 4, 4),
end=date(2022, 4, 17),
all_days=True
),
ManualChange(
replacer=easter2022,
resto=["en"],
start=date(2022, 4, 4),
end=date(2022, 4, 17),
all_days=True
),
ManualChange(
replacer=zomer_2022_1,
resto=["nl"],
start=date(2022, 6, 27),
end=date(2022, 7, 15),
all_days=True
),
ManualChange(
replacer=summer_2022_1,
resto=["en"],
start=date(2022, 6, 27),
end=date(2022, 7, 15),
all_days=True
),
ManualChange(
replacer=zomer_2022_2,
resto=["nl"],
start=date(2022, 7, 18),
end=date(2022, 7, 29),
all_days=True
),
ManualChange(
replacer=summer_2022_2,
resto=["en"],
start=date(2022, 7, 18),
end=date(2022, 7, 29),
all_days=True
),
ManualChange(
replacer=zomer_2022_1,
resto=["nl"],
start=date(2022, 8, 1),
end=date(2022, 9, 16),
all_days=True
),
ManualChange(
replacer=summer_2022_1,
resto=["en"],
start=date(2022, 8, 1),
end=date(2022, 9, 16),
all_days=True
),
ManualChange(
replacer=close_time_nl,
resto=["nl"],
start=date(2022, 9, 14),
end=date(2022, 9, 20),
all_days=True
),
ManualChange(
replacer=close_time_en,
resto=["en"],
start=date(2022, 9, 14),
end=date(2022, 9, 20),
all_days=True
),
ManualChange(
replacer=close_ardoyen_nl,
resto=["nl"],
start=date(2022, 9, 15),
end=date(2022, 9, 16),
all_days=True
),
ManualChange(
replacer=close_ardoyen_en,
resto=["en"],
start=date(2022, 9, 15),
end=date(2022, 9, 16),
all_days=True
),
ManualChange(
replacer=no_more_soup_nl,
resto=["nl"],
start=date(2022, 10, 26),
end=date(2022, 11, 2),
all_days=True
),
ManualChange(
replacer=no_more_soup_en,
resto=["en"],
start=date(2022, 10, 26),
end=date(2022, 11, 2),
all_days=True
)
]
# Actually do things ----------------------------------------------------------
def apply_existing_menus_only(output, manual_change, dates):
"""Apply the change to only existing menus"""
print(f"Matching existing menus from {manual_change.resto} between {manual_change.start} to {manual_change.end}")
print("====================================================================")
for resto in manual_change.resto:
files = glob.glob(f"{output}/menu/{resto}/*/*/*.json")
file_pattern = re.compile(r'.*/(\d+)/(\d+)/(\d+)\.json$')
for path in files:
# Check if this file applies or not.
m = file_pattern.search(path.replace("\\", "/"))
file_date = date(int(m.group(1)), int(m.group(2)), int(m.group(3)))
if not manual_change.is_applicable(file_date):
continue
with open(path, 'r') as f:
overview = json.loads(f.read())
_new_content = manual_change.replacer(path, overview)
dates[resto][_new_content["date"]] = _new_content
new_content = json.dumps(_new_content)
with open(path, 'w') as f:
f.write(new_content)
def apply_all_menus(output, manual_change, dates):
"""Apply the change to all dates in the applicable range. If no menu exist for a day, it will be created."""
print(f"Matching all menus from {manual_change.resto} between {manual_change.start} to {manual_change.end}")
print("====================================================================")
for applicable_date in manual_change.date_range():
year = applicable_date.year
month = applicable_date.month
day = applicable_date.day
# Get existing file if it exists
for resto in manual_change.resto:
path = f"{output}/menu/{resto}/{year}/{month}/{day}.json"
try:
with open(path, 'r') as f:
menu = json.loads(f.read())
except IOError:
os.makedirs(os.path.dirname(path), exist_ok=True)
menu = {'open': False, 'date': applicable_date.strftime('%Y-%m-%d'), 'meals': [], 'vegetables': []}
# Apply the changes
_new_content = manual_change.replacer(path, menu)
dates[resto][_new_content["date"]] = _new_content
new_content = json.dumps(_new_content)
with open(path, 'w+') as f:
f.write(new_content)
def main(output):
to_apply = create_changes(output)
dates = defaultdict(dict)
for manual_change in to_apply:
if manual_change.all_days:
apply_all_menus(output, manual_change, dates)
else:
apply_existing_menus_only(output, manual_change, dates)
for manual_change in to_apply:
print("Rebuilding overviews")
for resto in manual_change.resto:
match_glob = f"menu/{resto}/overview.json"
print(match_glob)
overviews = glob.glob(f"{output}/{match_glob}")
# For each overview that should be rebuild
for path in overviews:
print(f"Rebuilding {path}")
new_overview = []
with open(path, 'r') as f:
overview = json.loads(f.read())
last_day = None
# If the date is modified, replace it
for day in overview:
if day["date"] in dates[resto]:
print(f"Updating {day['date']}")
new_overview.append(dates[resto][day["date"]])
else:
print(f"Keeping {day['date']}")
new_overview.append(day)
last_day = day["date"]
# We want to provide at least ten days in the future.
to_add = max(OVERVIEW_COUNT - len(overview), 0)
if last_day:
last_day = datetime.strptime(last_day, '%Y-%m-%d').date()
for day in dates[resto]:
dday = datetime.strptime(day, '%Y-%m-%d').date()
if ((last_day and dday <= last_day) or (last_day is None and dday < date.today())) or to_add <= 0:
continue
new_overview.append(dates[resto][day])
to_add -= 1
with open(path, 'w') as f:
f.write(json.dumps(new_overview))
print("Wrote updated overview")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Apply manual corrections to scraped menu')
parser.add_argument('output', help='Folder of v2 output.')
args = parser.parse_args()
main(args.output)
|
# -*- coding: utf-8 -*-
# This file is part of the pymfony package.
#
# (c) Alexandre Quercia <alquerci@email.com>
#
# For the full copyright and license information, please view the LICENSE
# file that was distributed with this source code.
from __future__ import absolute_import;
import os;
from time import time;
import re;
from pymfony.component.system.oop import interface;
from pymfony.component.system.types import Array;
from pymfony.component.system.reflection import ReflectionObject;
from pymfony.component.system.exception import LogicException;
from pymfony.component.system.exception import InvalidArgumentException;
from pymfony.component.system.exception import RuntimeException;
from pymfony.component.config.loader import LoaderResolver;
from pymfony.component.config.loader import DelegatingLoader;
from pymfony.component.dependency.interface import ContainerInterface;
from pymfony.component.dependency.interface import ContainerAwareInterface;
from pymfony.component.dependency import ContainerBuilder;
from pymfony.component.dependency.parameterbag import ParameterBag;
from pymfony.component.dependency.loader import IniFileLoader;
from pymfony.component.dependency.loader import JsonFileLoader;
from pymfony.component.dependency.compilerpass import CheckDefinitionValidityPass;
from pymfony.component.dependency.compilerpass import ResolveReferencesToAliasesPass;
from pymfony.component.dependency.compilerpass import ResolveInvalidReferencesPass;
from pymfony.component.dependency.compilerpass import AnalyzeServiceReferencesPass;
from pymfony.component.dependency.compilerpass import CheckCircularReferencesPass;
from pymfony.component.dependency.compilerpass import CheckReferenceValidityPass;
from pymfony.component.dependency.compilerpass import RemovePrivateAliasesPass;
from pymfony.component.dependency.compilerpass import RemoveAbstractDefinitionsPass;
from pymfony.component.dependency.compilerpass import ReplaceAliasByActualDefinitionPass;
from pymfony.component.dependency.compilerpass import RepeatedPass;
from pymfony.component.dependency.compilerpass import InlineServiceDefinitionsPass;
from pymfony.component.dependency.compilerpass import RemoveUnusedDefinitionsPass;
from pymfony.component.dependency.compilerpass import CheckExceptionOnInvalidReferenceBehaviorPass;
from pymfony.component.dependency.compilerpass import ResolveDefinitionTemplatesPass;
from pymfony.component.dependency.compilerpass import ResolveParameterPlaceHoldersPass;
from pymfony.component.kernel.bundle import BundleInterface;
from pymfony.component.kernel.config import FileLocator;
from pymfony.component.kernel.config import FileResourceLocatorInterface;
from pymfony.component.kernel.dependency import MergeExtensionConfigurationPass;
from pymfony.component.kernel.debug import ExceptionHandler;
"""
"""
@interface
class KernelInterface(FileResourceLocatorInterface):
def registerContainerConfiguration(self, loader):
"""Loads the container configuration
@param loader: LoaderInterface A LoaderInterface instance
"""
pass;
def boot(self):
"""Boots the current kernel."""
pass;
def shutdown(self):
"""Shutdowns the kernel."""
pass;
def getName(self):
"""Gets the name of the kernel.
@return: string The kernel name
"""
pass;
def getEnvironment(self):
"""Gets the environment.
@return: string The current environment
"""
pass;
def isDebug(self):
"""Checks if debug mode is enabled.
@return: Boolean true if debug mode is enabled, false otherwise
"""
pass;
def getContainer(self):
"""Gets the current container.
@return: ContainerInterface A ContainerInterface instance
"""
pass;
def getStartTime(self):
"""Gets the request start time (not available if debug is disabled).
@return: float The request start timestamp
"""
pass;
def getNamespace(self):
"""Gets the Kernel namespace.
@return: string The Bundle namespace
"""
pass;
def registerBundles(self):
"""Returns a list of bundles to registers.
@return: BundleInterface[] An list of bundle instances.
"""
pass;
def getBundles(self):
"""Gets the registered bundle instances.
@return: BundleInterface{} An dict of registered bundle instances
"""
pass;
def isClassInActiveBundle(self, className):
"""Checks if a given class name belongs to an active bundle.
@param className: string A class name
@return: Boolean true if the class belongs to an active bundle,
false otherwise
"""
pass;
def getBundle(self, name, first=True):
"""Returns a bundle and optionally its descendants by its name.
@param name: string Bundle name
@param first: Boolean Whether to return the first bundle only or
together with its descendants
@return: BundleInterface|BundleInterface[] A BundleInterface instance
or an list of BundleInterface instances if $first is false
@raise InvalidArgumentException: when the bundle is not enabled
"""
pass;
def getCharset(self):
"""Gets the charset of the application.
@return: string The charset
"""
pass;
def getLogDir(self):
"""Gets the log directory.
@return: string The log directory
"""
pass;
def getCacheDir(self):
"""Gets the cache directory.
@return: string The cache directory
"""
pass;
def getRootDir(self):
"""Gets the application root dir.
@return: string The application root dir
"""
pass;
class Kernel(KernelInterface):
VERSION = '2.2.0-RC3';
VERSION_ID = '20100';
MAJOR_VERSION = '2';
MINOR_VERSION = '2';
RELEASE_VERSION = '0';
EXTRA_VERSION = 'RC3';
def __init__(self, environment, debug):
self._environment = environment;
self._debug = bool(debug);
self._name = None;
self._rootDir = None;
self._bundles = dict();
self._bundleMap = dict();
self._container = None;
self._extension = None;
self._booted = False;
self._rootDir = self.getRootDir();
self._name = self.getName();
self._version = self.getVersion();
if self._debug:
self._startTime = time();
self.init();
def init(self):
if self._debug:
ExceptionHandler.register(self._debug);
def __clone__(self):
if self._debug:
self._startTime = time();
self._booted = False;
self._container = None;
def getKernelParameters(self):
bundles = dict();
for name, bundle in self._bundles.items():
bundles[name] = ReflectionObject(bundle).getName();
parameters = {
'kernel.root_dir': self._rootDir,
'kernel.logs_dir': self.getLogDir(),
'kernel.cache_dir': self.getCacheDir(),
'kernel.environment': self._environment,
'kernel.debug': self._debug,
'kernel.name': self._name,
'kernel.bundles': bundles,
'kernel.charset': self.getCharset(),
'kernel.version': self.getVersion(),
};
parameters.update(self.getEnvParameters());
return parameters;
def getEnvParameters(self):
parameters = dict();
for key, value in os.environ.items():
key = str(key);
prefix = self.getName().upper()+"__";
prefix.replace("-", "");
if key.startswith(prefix):
name = key.replace("__", ".").lower();
parameters[name] = value;
return parameters;
def boot(self):
if self._booted:
return;
# init container
self._initializeBundles();
# init container
self._initializeContainer();
for bundle in self.getBundles().values():
assert isinstance(bundle, ContainerAwareInterface);
bundle.setContainer(self._container);
bundle.boot();
self._booted = True;
def _initializeContainer(self):
"""Initializes the service container."""
self._container = self.buildContainer();
self._container.set('kernel', self);
def _initializeBundles(self):
"""Initializes the data structures related to the bundle management.
- the bundles property maps a bundle name to the bundle instance,
- the bundleMap property maps a bundle name to the bundle inheritance
hierarchy (most derived bundle first).
@raise LogicException: if two bundles share a common name
@raise LogicException: if a bundle tries to extend a non-registered
bundle
@raise LogicException: if a bundle tries to extend itself
@raise LogicException: if two bundles extend the same ancestor
"""
# init bundle
self._bundles = dict();
topMostBundles = dict();
directChildren = dict();
for bundle in self.registerBundles():
assert isinstance(bundle, BundleInterface);
name = bundle.getName();
if name in self._bundles.keys():
raise LogicException(
'Trying to register two bundles with the same name "{0}"'
''.format(name)
);
self._bundles[name] = bundle;
parentName = bundle.getParent();
if parentName:
if parentName in directChildren.keys():
raise LogicException(
'Bundle "{0}" is directly extended by two bundles '
'"{1}" and "{2}".'
''.format(parentName, name, directChildren[parentName])
);
if parentName == name:
raise LogicException(
'Bundle "{0}" can not extend itself.'.format(name)
);
directChildren[parentName] = name;
else:
topMostBundles[name] = bundle;
# look for orphans
diff = Array.diff(
list(directChildren.keys()),
list(self._bundles.keys()),
);
if diff:
raise LogicException(
'Bundle "{0}" extends bundle "{1}", which is not registered.'
''.format(directChildren[diff[0]], diff[0])
);
# inheritance
self._bundleMap = dict();
for name, bundle in topMostBundles.items():
bundleMap = [bundle];
hierarchy = [name];
while name in directChildren.keys():
name = directChildren[name];
bundleMap.insert(0, self._bundles[name]);
hierarchy.append(name);
for name in hierarchy:
self._bundleMap[name] = list(bundleMap);
bundleMap.pop();
def buildContainer(self):
resouces = {
'cache': self.getCacheDir(),
'logs': self.getLogDir(),
};
for name, path in resouces.items():
if not os.path.isdir(path):
try:
os.makedirs(path, 0o777);
except Exception:
raise RuntimeException(
"Unable to create the {0} directory ({1})\n"
"".format(name, path)
);
elif not os.access(path, os.W_OK):
raise RuntimeException(
"Unable to write in the {0} directory ({1})\n"
"".format(name, path)
);
container = self.getContainerBuilder();
extensions = list();
container.addObjectResource(self);
for bundle in self._bundles.values():
extension = bundle.getContainerExtension();
if extension:
container.registerExtension(extension);
extensions.append(extension.getAlias());
if self._debug:
container.addObjectResource(bundle);
for bundle in self._bundles.values():
bundle.build(container);
container.addObjectResource(self);
# ensure these extensions are implicitly loaded
container.getCompilerPassConfig().setMergePass(
MergeExtensionConfigurationPass(extensions)
);
container.getCompilerPassConfig().setOptimizationPasses([
ResolveDefinitionTemplatesPass(),
ResolveParameterPlaceHoldersPass(),
CheckDefinitionValidityPass(),
ResolveReferencesToAliasesPass(),
ResolveInvalidReferencesPass(),
AnalyzeServiceReferencesPass(),
CheckCircularReferencesPass(),
CheckReferenceValidityPass(),
]);
container.getCompilerPassConfig().setRemovingPasses([
RemovePrivateAliasesPass(),
RemoveAbstractDefinitionsPass(),
ReplaceAliasByActualDefinitionPass(),
RepeatedPass([
AnalyzeServiceReferencesPass(),
InlineServiceDefinitionsPass(),
AnalyzeServiceReferencesPass(),
RemoveUnusedDefinitionsPass(),
]),
CheckExceptionOnInvalidReferenceBehaviorPass(),
]);
cont = self.registerContainerConfiguration(
self.getContainerLoader(container)
);
if not cont is None:
container.merge(cont);
container.compile();
return container;
def getNamespace(self):
return str(type(self).__module__);
def getContainerLoader(self, container):
assert isinstance(container, ContainerInterface);
locator = FileLocator(self);
resolver = LoaderResolver([
IniFileLoader(container, locator),
JsonFileLoader(container, locator),
]);
return DelegatingLoader(resolver);
def getContainerBuilder(self):
return ContainerBuilder(ParameterBag(self.getKernelParameters()));
def shutdown(self):
if not self._booted:
return;
self._booted = False;
for bundle in self.getBundles().values():
assert isinstance(bundle, BundleInterface);
bundle.shutdown();
bundle.setContainer(None);
self._container = None;
def getBundles(self):
return self._bundles;
def isClassInActiveBundle(self, className):
for bundle in self._bundles.values():
assert isinstance(bundle, BundleInterface);
if 0 == str(className).find(bundle.getNamespace()):
return True;
return False;
def getBundle(self, name, first=True):
if name not in self._bundleMap:
raise InvalidArgumentException(
'Bundle "{0}" does not exist or it is not enabled. Maybe you '
'forgot to add it in the registerBundles() method of your {1} '
'file?'.format(name, ReflectionObject(self).getFileName())
);
if first is True:
return self._bundleMap[name][0];
return self._bundleMap[name];
def getName(self):
if self._name is None:
self._name = re.sub(r"[^a-zA-Z0-9_]+", "", os.path.basename(self._rootDir));
return self._name;
def getVersion(self):
return self.VERSION+' - '+self.getEnvironment()+('/debug' if self.isDebug() else '');
def getEnvironment(self):
return self._environment;
def getContainer(self):
return self._container;
def getStartTime(self):
if self._debug:
return self._startTime;
else:
return -1;
def isDebug(self):
return self._debug;
def locateResource(self, name, directory=None, first=True):
"""Returns the file path for a given resource.
A Resource can be a file or a directory.
The resource name must follow the following pattern:
@BundleName/path/to/a/file.something
where package is the name of the package
and the remaining part is the relative path in the package.
If directory is passed, and the first segment of the path is Resources,
this method will look for a file named:
directory/BundleName/path/without/Resources
If BundleName is empty the application root directory is use.
%kernel.root_dir%/path/to/a/file.something
@param name: string A resource name to locate
@param path: string A directory where to look for the resource first
@param first: Boolean Whether to return the first path
or paths for all matching bundles
@return: string|array The absolute path of the resource
or an array if $first is false
@raise InvalidArgumentException: if the file cannot be found or
the name is not valid
@raise RuntimeException: if the name contains invalid/unsafe characters
"""
name = str(name);
isResource = False;
if not name.startswith("@"):
raise InvalidArgumentException(
'A resource name must start with @ ("{0}" given).'
"".format(name)
)
if ".." in name:
raise RuntimeException(
'File name "{0}" contains invalid characters (..).'
"".format(name)
);
bundleName = name[1:];
if "/" in bundleName:
bundleName, path = bundleName.split("/", 1);
if path.startswith("Resources") and directory:
isResource = True;
overridePath = path[10:];
resourceBundle = None;
files = [];
if bundleName:
bundles = self.getBundle(bundleName, False);
for bundle in bundles:
if isResource:
filename = os.path.join(
directory,
bundle.getName(),
overridePath
);
if os.path.exists(filename):
if resourceBundle:
raise RuntimeException(
'"{0}" resource is hidden by a resource from '
'the "{1}" derived bundle. Create a "{2}" '
'file to override the bundle resource.'
''.format(
filename,
resourceBundle,
directory+'/'+bundles[0].getName()+'/'+overridePath
));
if first:
return filename;
files.append(filename);
filename = os.path.join(bundle.getPath(), path);
if os.path.exists(filename):
if first and not isResource:
return filename;
files.append(filename);
resourceBundle = bundle.getName();
else:
# check in root_dir when bundle name is empty
if isResource:
filename = os.path.join(directory, overridePath);
else:
filename = os.path.join(self._rootDir, path);
if os.path.exists(filename):
if first and not isResource:
return filename;
files.append(filename);
if files:
if first and isResource:
return files[0];
else:
return files;
raise InvalidArgumentException(
'Unable to find file "{0}".'.format(name)
);
def getRootDir(self):
if self._rootDir is None:
r = ReflectionObject(self);
self._rootDir = os.path.dirname(r.getFileName()).replace('\\', '/');
return self._rootDir;
def getCacheDir(self):
return self._rootDir+'/cache/'+self._environment;
def getLogDir(self):
return self._rootDir+'/logs/'+self._environment;
def getCharset(self):
return 'UTF-8';
def getConsoleKernel(self):
if not self._booted:
self.boot();
return self._container.get('console_kernel');
bumped Symfony version to 2.2.0-RC4-DEV
# -*- coding: utf-8 -*-
# This file is part of the pymfony package.
#
# (c) Alexandre Quercia <alquerci@email.com>
#
# For the full copyright and license information, please view the LICENSE
# file that was distributed with this source code.
from __future__ import absolute_import;
import os;
from time import time;
import re;
from pymfony.component.system.oop import interface;
from pymfony.component.system.types import Array;
from pymfony.component.system.reflection import ReflectionObject;
from pymfony.component.system.exception import LogicException;
from pymfony.component.system.exception import InvalidArgumentException;
from pymfony.component.system.exception import RuntimeException;
from pymfony.component.config.loader import LoaderResolver;
from pymfony.component.config.loader import DelegatingLoader;
from pymfony.component.dependency.interface import ContainerInterface;
from pymfony.component.dependency.interface import ContainerAwareInterface;
from pymfony.component.dependency import ContainerBuilder;
from pymfony.component.dependency.parameterbag import ParameterBag;
from pymfony.component.dependency.loader import IniFileLoader;
from pymfony.component.dependency.loader import JsonFileLoader;
from pymfony.component.dependency.compilerpass import CheckDefinitionValidityPass;
from pymfony.component.dependency.compilerpass import ResolveReferencesToAliasesPass;
from pymfony.component.dependency.compilerpass import ResolveInvalidReferencesPass;
from pymfony.component.dependency.compilerpass import AnalyzeServiceReferencesPass;
from pymfony.component.dependency.compilerpass import CheckCircularReferencesPass;
from pymfony.component.dependency.compilerpass import CheckReferenceValidityPass;
from pymfony.component.dependency.compilerpass import RemovePrivateAliasesPass;
from pymfony.component.dependency.compilerpass import RemoveAbstractDefinitionsPass;
from pymfony.component.dependency.compilerpass import ReplaceAliasByActualDefinitionPass;
from pymfony.component.dependency.compilerpass import RepeatedPass;
from pymfony.component.dependency.compilerpass import InlineServiceDefinitionsPass;
from pymfony.component.dependency.compilerpass import RemoveUnusedDefinitionsPass;
from pymfony.component.dependency.compilerpass import CheckExceptionOnInvalidReferenceBehaviorPass;
from pymfony.component.dependency.compilerpass import ResolveDefinitionTemplatesPass;
from pymfony.component.dependency.compilerpass import ResolveParameterPlaceHoldersPass;
from pymfony.component.kernel.bundle import BundleInterface;
from pymfony.component.kernel.config import FileLocator;
from pymfony.component.kernel.config import FileResourceLocatorInterface;
from pymfony.component.kernel.dependency import MergeExtensionConfigurationPass;
from pymfony.component.kernel.debug import ExceptionHandler;
"""
"""
@interface
class KernelInterface(FileResourceLocatorInterface):
def registerContainerConfiguration(self, loader):
"""Loads the container configuration
@param loader: LoaderInterface A LoaderInterface instance
"""
pass;
def boot(self):
"""Boots the current kernel."""
pass;
def shutdown(self):
"""Shutdowns the kernel."""
pass;
def getName(self):
"""Gets the name of the kernel.
@return: string The kernel name
"""
pass;
def getEnvironment(self):
"""Gets the environment.
@return: string The current environment
"""
pass;
def isDebug(self):
"""Checks if debug mode is enabled.
@return: Boolean true if debug mode is enabled, false otherwise
"""
pass;
def getContainer(self):
"""Gets the current container.
@return: ContainerInterface A ContainerInterface instance
"""
pass;
def getStartTime(self):
"""Gets the request start time (not available if debug is disabled).
@return: float The request start timestamp
"""
pass;
def getNamespace(self):
"""Gets the Kernel namespace.
@return: string The Bundle namespace
"""
pass;
def registerBundles(self):
"""Returns a list of bundles to registers.
@return: BundleInterface[] An list of bundle instances.
"""
pass;
def getBundles(self):
"""Gets the registered bundle instances.
@return: BundleInterface{} An dict of registered bundle instances
"""
pass;
def isClassInActiveBundle(self, className):
"""Checks if a given class name belongs to an active bundle.
@param className: string A class name
@return: Boolean true if the class belongs to an active bundle,
false otherwise
"""
pass;
def getBundle(self, name, first=True):
"""Returns a bundle and optionally its descendants by its name.
@param name: string Bundle name
@param first: Boolean Whether to return the first bundle only or
together with its descendants
@return: BundleInterface|BundleInterface[] A BundleInterface instance
or an list of BundleInterface instances if $first is false
@raise InvalidArgumentException: when the bundle is not enabled
"""
pass;
def getCharset(self):
"""Gets the charset of the application.
@return: string The charset
"""
pass;
def getLogDir(self):
"""Gets the log directory.
@return: string The log directory
"""
pass;
def getCacheDir(self):
"""Gets the cache directory.
@return: string The cache directory
"""
pass;
def getRootDir(self):
"""Gets the application root dir.
@return: string The application root dir
"""
pass;
class Kernel(KernelInterface):
VERSION = '2.2.0-RC4';
VERSION_ID = '20100';
MAJOR_VERSION = '2';
MINOR_VERSION = '2';
RELEASE_VERSION = '0';
EXTRA_VERSION = 'RC4';
def __init__(self, environment, debug):
self._environment = environment;
self._debug = bool(debug);
self._name = None;
self._rootDir = None;
self._bundles = dict();
self._bundleMap = dict();
self._container = None;
self._extension = None;
self._booted = False;
self._rootDir = self.getRootDir();
self._name = self.getName();
self._version = self.getVersion();
if self._debug:
self._startTime = time();
self.init();
def init(self):
if self._debug:
ExceptionHandler.register(self._debug);
def __clone__(self):
if self._debug:
self._startTime = time();
self._booted = False;
self._container = None;
def getKernelParameters(self):
bundles = dict();
for name, bundle in self._bundles.items():
bundles[name] = ReflectionObject(bundle).getName();
parameters = {
'kernel.root_dir': self._rootDir,
'kernel.logs_dir': self.getLogDir(),
'kernel.cache_dir': self.getCacheDir(),
'kernel.environment': self._environment,
'kernel.debug': self._debug,
'kernel.name': self._name,
'kernel.bundles': bundles,
'kernel.charset': self.getCharset(),
'kernel.version': self.getVersion(),
};
parameters.update(self.getEnvParameters());
return parameters;
def getEnvParameters(self):
parameters = dict();
for key, value in os.environ.items():
key = str(key);
prefix = self.getName().upper()+"__";
prefix.replace("-", "");
if key.startswith(prefix):
name = key.replace("__", ".").lower();
parameters[name] = value;
return parameters;
def boot(self):
if self._booted:
return;
# init container
self._initializeBundles();
# init container
self._initializeContainer();
for bundle in self.getBundles().values():
assert isinstance(bundle, ContainerAwareInterface);
bundle.setContainer(self._container);
bundle.boot();
self._booted = True;
def _initializeContainer(self):
"""Initializes the service container."""
self._container = self.buildContainer();
self._container.set('kernel', self);
def _initializeBundles(self):
"""Initializes the data structures related to the bundle management.
- the bundles property maps a bundle name to the bundle instance,
- the bundleMap property maps a bundle name to the bundle inheritance
hierarchy (most derived bundle first).
@raise LogicException: if two bundles share a common name
@raise LogicException: if a bundle tries to extend a non-registered
bundle
@raise LogicException: if a bundle tries to extend itself
@raise LogicException: if two bundles extend the same ancestor
"""
# init bundle
self._bundles = dict();
topMostBundles = dict();
directChildren = dict();
for bundle in self.registerBundles():
assert isinstance(bundle, BundleInterface);
name = bundle.getName();
if name in self._bundles.keys():
raise LogicException(
'Trying to register two bundles with the same name "{0}"'
''.format(name)
);
self._bundles[name] = bundle;
parentName = bundle.getParent();
if parentName:
if parentName in directChildren.keys():
raise LogicException(
'Bundle "{0}" is directly extended by two bundles '
'"{1}" and "{2}".'
''.format(parentName, name, directChildren[parentName])
);
if parentName == name:
raise LogicException(
'Bundle "{0}" can not extend itself.'.format(name)
);
directChildren[parentName] = name;
else:
topMostBundles[name] = bundle;
# look for orphans
diff = Array.diff(
list(directChildren.keys()),
list(self._bundles.keys()),
);
if diff:
raise LogicException(
'Bundle "{0}" extends bundle "{1}", which is not registered.'
''.format(directChildren[diff[0]], diff[0])
);
# inheritance
self._bundleMap = dict();
for name, bundle in topMostBundles.items():
bundleMap = [bundle];
hierarchy = [name];
while name in directChildren.keys():
name = directChildren[name];
bundleMap.insert(0, self._bundles[name]);
hierarchy.append(name);
for name in hierarchy:
self._bundleMap[name] = list(bundleMap);
bundleMap.pop();
def buildContainer(self):
resouces = {
'cache': self.getCacheDir(),
'logs': self.getLogDir(),
};
for name, path in resouces.items():
if not os.path.isdir(path):
try:
os.makedirs(path, 0o777);
except Exception:
raise RuntimeException(
"Unable to create the {0} directory ({1})\n"
"".format(name, path)
);
elif not os.access(path, os.W_OK):
raise RuntimeException(
"Unable to write in the {0} directory ({1})\n"
"".format(name, path)
);
container = self.getContainerBuilder();
extensions = list();
container.addObjectResource(self);
for bundle in self._bundles.values():
extension = bundle.getContainerExtension();
if extension:
container.registerExtension(extension);
extensions.append(extension.getAlias());
if self._debug:
container.addObjectResource(bundle);
for bundle in self._bundles.values():
bundle.build(container);
container.addObjectResource(self);
# ensure these extensions are implicitly loaded
container.getCompilerPassConfig().setMergePass(
MergeExtensionConfigurationPass(extensions)
);
container.getCompilerPassConfig().setOptimizationPasses([
ResolveDefinitionTemplatesPass(),
ResolveParameterPlaceHoldersPass(),
CheckDefinitionValidityPass(),
ResolveReferencesToAliasesPass(),
ResolveInvalidReferencesPass(),
AnalyzeServiceReferencesPass(),
CheckCircularReferencesPass(),
CheckReferenceValidityPass(),
]);
container.getCompilerPassConfig().setRemovingPasses([
RemovePrivateAliasesPass(),
RemoveAbstractDefinitionsPass(),
ReplaceAliasByActualDefinitionPass(),
RepeatedPass([
AnalyzeServiceReferencesPass(),
InlineServiceDefinitionsPass(),
AnalyzeServiceReferencesPass(),
RemoveUnusedDefinitionsPass(),
]),
CheckExceptionOnInvalidReferenceBehaviorPass(),
]);
cont = self.registerContainerConfiguration(
self.getContainerLoader(container)
);
if not cont is None:
container.merge(cont);
container.compile();
return container;
def getNamespace(self):
return str(type(self).__module__);
def getContainerLoader(self, container):
assert isinstance(container, ContainerInterface);
locator = FileLocator(self);
resolver = LoaderResolver([
IniFileLoader(container, locator),
JsonFileLoader(container, locator),
]);
return DelegatingLoader(resolver);
def getContainerBuilder(self):
return ContainerBuilder(ParameterBag(self.getKernelParameters()));
def shutdown(self):
if not self._booted:
return;
self._booted = False;
for bundle in self.getBundles().values():
assert isinstance(bundle, BundleInterface);
bundle.shutdown();
bundle.setContainer(None);
self._container = None;
def getBundles(self):
return self._bundles;
def isClassInActiveBundle(self, className):
for bundle in self._bundles.values():
assert isinstance(bundle, BundleInterface);
if 0 == str(className).find(bundle.getNamespace()):
return True;
return False;
def getBundle(self, name, first=True):
if name not in self._bundleMap:
raise InvalidArgumentException(
'Bundle "{0}" does not exist or it is not enabled. Maybe you '
'forgot to add it in the registerBundles() method of your {1} '
'file?'.format(name, ReflectionObject(self).getFileName())
);
if first is True:
return self._bundleMap[name][0];
return self._bundleMap[name];
def getName(self):
if self._name is None:
self._name = re.sub(r"[^a-zA-Z0-9_]+", "", os.path.basename(self._rootDir));
return self._name;
def getVersion(self):
return self.VERSION+' - '+self.getEnvironment()+('/debug' if self.isDebug() else '');
def getEnvironment(self):
return self._environment;
def getContainer(self):
return self._container;
def getStartTime(self):
if self._debug:
return self._startTime;
else:
return -1;
def isDebug(self):
return self._debug;
def locateResource(self, name, directory=None, first=True):
"""Returns the file path for a given resource.
A Resource can be a file or a directory.
The resource name must follow the following pattern:
@BundleName/path/to/a/file.something
where package is the name of the package
and the remaining part is the relative path in the package.
If directory is passed, and the first segment of the path is Resources,
this method will look for a file named:
directory/BundleName/path/without/Resources
If BundleName is empty the application root directory is use.
%kernel.root_dir%/path/to/a/file.something
@param name: string A resource name to locate
@param path: string A directory where to look for the resource first
@param first: Boolean Whether to return the first path
or paths for all matching bundles
@return: string|array The absolute path of the resource
or an array if $first is false
@raise InvalidArgumentException: if the file cannot be found or
the name is not valid
@raise RuntimeException: if the name contains invalid/unsafe characters
"""
name = str(name);
isResource = False;
if not name.startswith("@"):
raise InvalidArgumentException(
'A resource name must start with @ ("{0}" given).'
"".format(name)
)
if ".." in name:
raise RuntimeException(
'File name "{0}" contains invalid characters (..).'
"".format(name)
);
bundleName = name[1:];
if "/" in bundleName:
bundleName, path = bundleName.split("/", 1);
if path.startswith("Resources") and directory:
isResource = True;
overridePath = path[10:];
resourceBundle = None;
files = [];
if bundleName:
bundles = self.getBundle(bundleName, False);
for bundle in bundles:
if isResource:
filename = os.path.join(
directory,
bundle.getName(),
overridePath
);
if os.path.exists(filename):
if resourceBundle:
raise RuntimeException(
'"{0}" resource is hidden by a resource from '
'the "{1}" derived bundle. Create a "{2}" '
'file to override the bundle resource.'
''.format(
filename,
resourceBundle,
directory+'/'+bundles[0].getName()+'/'+overridePath
));
if first:
return filename;
files.append(filename);
filename = os.path.join(bundle.getPath(), path);
if os.path.exists(filename):
if first and not isResource:
return filename;
files.append(filename);
resourceBundle = bundle.getName();
else:
# check in root_dir when bundle name is empty
if isResource:
filename = os.path.join(directory, overridePath);
else:
filename = os.path.join(self._rootDir, path);
if os.path.exists(filename):
if first and not isResource:
return filename;
files.append(filename);
if files:
if first and isResource:
return files[0];
else:
return files;
raise InvalidArgumentException(
'Unable to find file "{0}".'.format(name)
);
def getRootDir(self):
if self._rootDir is None:
r = ReflectionObject(self);
self._rootDir = os.path.dirname(r.getFileName()).replace('\\', '/');
return self._rootDir;
def getCacheDir(self):
return self._rootDir+'/cache/'+self._environment;
def getLogDir(self):
return self._rootDir+'/logs/'+self._environment;
def getCharset(self):
return 'UTF-8';
def getConsoleKernel(self):
if not self._booted:
self.boot();
return self._container.get('console_kernel');
|
[fix] readd safeguard
|
minor fix
|
# Copyright 2020 Creu Blanca
# @author: Enric Tobella
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo.addons.component.core import Component
class AccountMoveL10nEsFacturaeListener(Component):
_name = "account.move.l10n.es.facturae.listener"
_inherit = "base.event.listener"
_apply_on = ["account.move"]
def _get_backend(self, record):
return self.env.ref("l10n_es_facturae_face.face_backend")
def _get_exchange_record_vals(self, record):
return {
"model": record._name,
"res_id": record.id,
}
def on_post_account_move(self, records):
for record in records:
partner = record.partner_id
if record.move_type not in ["out_invoice", "out_refund"]:
continue
if not partner.facturae or not partner.l10n_es_facturae_sending_code:
continue
backend = self._get_backend(record)
if not backend:
continue
exchange_type = "l10n_es_facturae"
# We check fields now to raise an error to the user, otherwise the
# error will be raising silently in the queue job.
record.validate_facturae_fields()
if record._has_exchange_record(exchange_type, backend):
continue
exchange_record = backend.create_record(
exchange_type, self._get_exchange_record_vals(record)
)
backend._delay_action(exchange_record).exchange_generate(exchange_record)
def on_generate_account_edi(self, records):
return self.on_post_account_move(records)
[IMP] l10n_es_facturae_face: Use last edi interface
# Copyright 2020 Creu Blanca
# @author: Enric Tobella
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo.addons.component.core import Component
class AccountMoveL10nEsFacturaeListener(Component):
_name = "account.move.l10n.es.facturae.listener"
_inherit = "base.event.listener"
_apply_on = ["account.move"]
def _get_backend(self, record):
return self.env.ref("l10n_es_facturae_face.face_backend")
def _get_exchange_record_vals(self, record):
return {
"model": record._name,
"res_id": record.id,
}
def on_post_account_move(self, records):
for record in records:
partner = record.partner_id
if record.move_type not in ["out_invoice", "out_refund"]:
continue
if not partner.facturae or not partner.l10n_es_facturae_sending_code:
continue
backend = self._get_backend(record)
if not backend:
continue
exchange_type = "l10n_es_facturae"
# We check fields now to raise an error to the user, otherwise the
# error will be raising silently in the queue job.
record.validate_facturae_fields()
if record._has_exchange_record(exchange_type, backend):
continue
exchange_record = backend.create_record(
exchange_type, self._get_exchange_record_vals(record)
)
exchange_record.with_delay().action_exchange_generate()
def on_generate_account_edi(self, records):
return self.on_post_account_move(records)
|
"""
.. py:module:: fnl.stat.textclass
:synopsis: Tools for developing a text classifier.
.. moduleauthor:: Florian Leitner <florian.leitner@gmail.com>
.. License: GNU Affero GPL v3 (http://www.gnu.org/licenses/agpl.html)
"""
from collections import namedtuple
from itertools import chain
from functools import partial
import numpy as np
from sklearn import metrics
from sklearn.cross_validation import StratifiedKFold
from sklearn.grid_search import GridSearchCV
# Note: the minority label (always first, i.e., at index 0)
# should be used as the positive label to ensure
# precision and recall produce meaningful results
# and that the F-score is robust.
METRICS = [
('Accuracy', metrics.accuracy_score),
('Precision', partial(metrics.precision_score, pos_label=0)),
('Recall', partial(metrics.recall_score, pos_label=0)),
('F1-score', partial(metrics.f1_score, pos_label=0)),
('MCC score', metrics.matthews_corrcoef),
]
# A scoring function that is robust against class-imbalances.
Scorer = metrics.make_scorer(metrics.matthews_corrcoef)
# A less restrictive stop-word list
# (compared to the built-in scikit-learn list).
STOP_WORDS = {
'a',
'about',
'again',
'all',
'also',
'an',
'and',
'any',
'are',
'as',
'at',
'be',
'because',
'been',
'before',
'being',
'between',
'both',
'but',
'by',
'can',
'could',
'did',
'do',
'does',
'during',
'each',
'for',
'from',
'further',
'had',
'has',
'have',
'having',
'here',
'how',
'however',
'i',
'if',
'in',
'into',
'is',
'it',
'its',
'itself',
'most',
'no',
'nor',
'not',
'of',
'on',
'or',
'our',
'should',
'so',
'some',
'such',
'than',
'that',
'the',
'their',
'theirs',
'them',
'then',
'there',
'therefor',
'therefore',
'these',
'they',
'this',
'those',
'through',
'thus',
'to',
'very',
'was',
'we',
'were',
'what',
'when',
'which',
'while',
'with',
'would',
}
# Contrary to the scikit-learn built in list,
# also add capitalized versions of all words
# to filter case-sensitive texts, too.
STOP_WORDS.update(w.capitalize() for w in list(STOP_WORDS))
STOP_WORDS = frozenset(STOP_WORDS)
# Words that are often classified as gene names.
UNMASK = frozenset({
#'-',
#'.',
'Ab',
'anti',
'antibody',
'antibodies',
'binding',
'ChIP',
'Chromatin',
'construct',
'constructs',
'enhancer',
'element',
'elements',
'exon',
'factor',
'family',
'Fig',
'fragment',
'gene',
'genes',
'GFP',
'human',
'islets',
'isoform',
'isoforms',
'kb',
'luciferase',
'mouse',
'motif',
'mutant',
'mutants',
'mRNA',
'proximal',
'promoter',
'promoters',
'protein',
'proteins',
'rat',
'reporter',
'region',
'regions',
'repressor',
'sequence',
'sequences',
'shRNA',
'shRNAs',
'siRNA',
'siRNAs',
'silencer',
'site',
'sites',
'Table',
'transcription',
})
# Reporting setup as chosen by the user.
Report = namedtuple('Report',
'parameters top worst fn fp classification folds')
class Data(object):
"""
The data object is a container for all data relevant to the classifiers.
"""
def __init__(self, *files, column=None, decap=False):
"""
Create a new data object with the following attributes:
* instances - list of raw text instances
* labels - array of instance labels in same order as raw text
* features - matrix of feature vectors per text instance
* names - array of feature names in same order as features
Both features and names are undefined until extracted
using some Vectorizer.
Use `decap=True` to lower-case the first letter of each sentence.
"""
try:
if column is None:
self.instances = [f.readlines() for f in files]
self.raw = self.instances
else:
read = ReadNERInput(column)
self.raw, self.instances = zip(*[read(f) for f in files])
except UnicodeDecodeError as e:
import sys
print('decoding error:', e.reason, 'in input file')
sys.exit(1)
if decap:
for group in self.instances:
for i in range(len(group)):
s = group[i]
group[i] = "{}{}".format(s[0].lower(), s[1:])
# ensure the minority label(s) come first (evaluation!)
self.instances = sorted(self.instances, key=len)
self.classes = len(self.instances)
self.raw = sorted(self.raw, key=len)
self.labels = np.concatenate([
(np.zeros(len(data), dtype=np.uint8) + i)
for i, data in enumerate(self.instances)
])
self.instances = list(chain.from_iterable(self.instances))
self.raw = list(chain.from_iterable(self.raw))
self.features = None
self.names = None
def extract(self, vectorizer):
"""Extract the features from the instances using a Vectorizer."""
self.features = vectorizer.fit_transform(self.instances, self.labels)
self.names = np.asarray(vectorizer.get_feature_names())
return self
def transform(self, method):
"""Transform the features with a selection or transformation method."""
self.features = method.fit_transform(self.features, self.labels)
return self
@property
def n_features(self):
"""The number of features."""
return self.features.shape[1]
@property
def n_instances(self):
"""The (total) number of instances."""
return self.features.shape[0]
@property
def sizes(self):
"""Number of instances per class."""
counter = {}
for l in self.labels:
try:
counter[l] += 1
except KeyError:
counter[l] = 1
return [counter[l] for l in sorted(counter.keys())]
class Line(object):
def __init__(self):
self.buffer = []
self.raw = []
self.entities = {}
self._entity = []
self._entity_type = None
self._filter_fig = False
def hasContent(self):
return len(self.buffer) > 0
def parsingEntity(self):
return self._entity_type is not None
def openEntity(self, name, token):
if not self._entity_type == name:
self.closeEntity()
self.raw.append('<{}>'.format(name))
self._entity_type = name
if name not in self.entities:
self.entities[name] = set()
self._entity.append(token)
def closeEntity(self):
if self._entity_type:
name = self._entity_type
self.raw.append('</{}>'.format(name))
self.entities[name].add(' '.join(self._entity))
self._entity = []
self._entity_type = None
self.stopFilteringFigure()
def filteringFigure(self):
return self._filter_fig
def startFilteringFigure(self):
self._filter_fig = True
def stopFilteringFigure(self):
self._filter_fig = False
def append(self, token, raw=None):
self.buffer.append(token)
if raw:
self.raw.append(raw)
else:
self.raw.append(token)
def ReadNERInput(word_col=2):
"""
Generate a function to read NER/IOB token files
(with the token word in column `word_col`, 3rd by default).
"""
def read(stream, tag_col=-1):
"""Read NER/IOB token files (with the NER tag in last column)."""
data = Line()
lines = []
raw = []
for line in stream:
content = line.strip()
if not content:
if data.hasContent():
data.closeEntity()
entity_counts = ' '.join(
'{}-{}'.format(e_type, len(data.entities[e_type]))
for e_type in data.entities
)
lines.append('{} {}'.format(' '.join(data.buffer),
entity_counts))
raw.append(' '.join(data.raw))
data = Line()
else:
items = content.split('\t')
tag = items[tag_col]
token = items[word_col]
if tag == 'O':
data.closeEntity()
data.append(token)
elif tag.startswith('B-') or \
tag.startswith('I-'):
if token in UNMASK:
data.closeEntity()
if token == 'Fig':
data.startFilteringFigure()
else:
data.stopFilteringFigure()
data.append(token)
elif data.filteringFigure():
data.append(token)
elif items[word_col] in ('.', '-'):
if not data.parsingEntity():
data.buffer.append(token)
data.raw.append(token)
else:
data.openEntity(tag[2:], token)
data.append(tag, raw=token)
else:
raise ValueError('unknown IOB tag "%s" for "%s"' %
(tag, token))
return raw, lines
return read
def GridSearch(data, pipeline, parameters, report):
"""Do a gird search for the `parameters` of a `pipeline`."""
grid = GridSearchCV(pipeline, parameters, scoring=Scorer,
cv=report.folds, refit=False, n_jobs=4, verbose=1)
grid.fit(data.instances, data.labels)
print("best score:", grid.best_score_)
for name, value in grid.best_params_.items():
print('{}:\t{}'.format(name, repr(value)))
def Predict(data, pipeline):
"""Predict lables for `data` using a sklearn `pipeline`."""
labels = pipeline.predict(data.instances)
for i, l in enumerate(labels):
print(data.raw[i].strip(), l, sep='\t')
def Classify(data, classifier, report):
"""
Classify `data` using some sklearn `classifier`,
producing output as given by `report`.
"""
results = {}
scores = {n: np.zeros(report.folds) for n, f in METRICS}
results[classifier.__class__.__name__] = scores
cross_val = StratifiedKFold(data.labels, n_folds=report.folds)
for step, (train, test) in enumerate(cross_val):
classifier.fit(data.features[train], data.labels[train])
targets = data.labels[test]
predictions = classifier.predict(data.features[test])
for measure, scoring_function in METRICS:
if data.classes > 2 and measure == 'MCC score':
scores[measure][step] = 0.0
else:
scores[measure][step] = scoring_function(targets, predictions)
if report.fn or report.fp:
PrintErrors(test, predictions, targets, data, report)
if report.classification:
print(metrics.classification_report(targets, predictions))
if (report.top or report.worst):
PrintFeatures(classifier, data, report)
if report.top or report.worst or report.classification or \
report.fn or report.fp or report.parameters:
print()
EvaluationReport(results)
def PrintErrors(test, predictions, targets, data, report):
"""Reporting of FP and FN instances."""
for i in range(predictions.shape[0]):
if predictions[i] != targets[i]:
if targets[i] == 0 and report.fn:
print("FN:", data.raw[test[i]])
elif targets[i] != 0 and report.fp:
print("FP:", data.raw[test[i]])
def PrintFeatures(classifier, data, report):
"""Reporting of most/least significant features."""
for i in range(classifier.coef_.shape[0]):
if report.top:
topN = np.argsort(classifier.coef_[i])[-report.top:]
else:
topN = []
if report.worst:
worstN = np.argsort(classifier.coef_[i])[:report.worst]
else:
worstN = []
print('group {2} features (top-worst): "{0}", ... "{1}"'.format(
'", "'.join(data.names[topN]),
'", "'.join(data.names[worstN]), i + 1,
))
def EvaluationReport(results):
"""Evaluation result table for all classifiers."""
classifiers = list(sorted(results.keys()))
heading = '{:<10s}'
cell = "{:>2.1f}+/-{:.2f}"
print('MEASURE \t{}'.format('\t'.join(
heading.format(c) for c in classifiers
)))
for m, f in METRICS:
line = [heading.format(m)]
for c in classifiers:
s = results[c][m]
line.append(cell.format(100 * s.mean(), 200 * s.std()))
print('\t'.join(line))
def PrintParams(klass, report):
"""Reporting of classifier parameters."""
if report.top or report.worst or report.classification or \
report.fn or report.fp or report.parameters:
text = "= {} =".format(klass.__class__.__name__)
print('=' * len(text))
print(text)
print('=' * len(text))
if report.parameters:
print("\n".join(
"{}: {}".format(k, v) for k, v in klass.get_params().items()
))
added confidence score output to classification results
"""
.. py:module:: fnl.stat.textclass
:synopsis: Tools for developing a text classifier.
.. moduleauthor:: Florian Leitner <florian.leitner@gmail.com>
.. License: GNU Affero GPL v3 (http://www.gnu.org/licenses/agpl.html)
"""
from collections import namedtuple
from itertools import chain
from functools import partial
import numpy as np
from sklearn import metrics
from sklearn.cross_validation import StratifiedKFold
from sklearn.grid_search import GridSearchCV
# Note: the minority label (always first, i.e., at index 0)
# should be used as the positive label to ensure
# precision and recall produce meaningful results
# and that the F-score is robust.
METRICS = [
('Accuracy', metrics.accuracy_score),
('Precision', partial(metrics.precision_score, pos_label=0)),
('Recall', partial(metrics.recall_score, pos_label=0)),
('F1-score', partial(metrics.f1_score, pos_label=0)),
('MCC score', metrics.matthews_corrcoef),
]
# A scoring function that is robust against class-imbalances.
Scorer = metrics.make_scorer(metrics.matthews_corrcoef)
# A less restrictive stop-word list
# (compared to the built-in scikit-learn list).
STOP_WORDS = {
'a',
'about',
'again',
'all',
'also',
'an',
'and',
'any',
'are',
'as',
'at',
'be',
'because',
'been',
'before',
'being',
'between',
'both',
'but',
'by',
'can',
'could',
'did',
'do',
'does',
'during',
'each',
'for',
'from',
'further',
'had',
'has',
'have',
'having',
'here',
'how',
'however',
'i',
'if',
'in',
'into',
'is',
'it',
'its',
'itself',
'most',
'no',
'nor',
'not',
'of',
'on',
'or',
'our',
'should',
'so',
'some',
'such',
'than',
'that',
'the',
'their',
'theirs',
'them',
'then',
'there',
'therefor',
'therefore',
'these',
'they',
'this',
'those',
'through',
'thus',
'to',
'very',
'was',
'we',
'were',
'what',
'when',
'which',
'while',
'with',
'would',
}
# Contrary to the scikit-learn built in list,
# also add capitalized versions of all words
# to filter case-sensitive texts, too.
STOP_WORDS.update(w.capitalize() for w in list(STOP_WORDS))
STOP_WORDS = frozenset(STOP_WORDS)
# Words that are often classified as gene names.
UNMASK = frozenset({
#'-',
#'.',
'Ab',
'anti',
'antibody',
'antibodies',
'binding',
'ChIP',
'Chromatin',
'construct',
'constructs',
'enhancer',
'element',
'elements',
'exon',
'factor',
'family',
'Fig',
'fragment',
'gene',
'genes',
'GFP',
'human',
'islets',
'isoform',
'isoforms',
'kb',
'luciferase',
'mouse',
'motif',
'mutant',
'mutants',
'mRNA',
'proximal',
'promoter',
'promoters',
'protein',
'proteins',
'rat',
'reporter',
'region',
'regions',
'repressor',
'sequence',
'sequences',
'shRNA',
'shRNAs',
'siRNA',
'siRNAs',
'silencer',
'site',
'sites',
'Table',
'transcription',
})
# Reporting setup as chosen by the user.
Report = namedtuple('Report',
'parameters top worst fn fp classification folds')
class Data:
"""
The data object is a container for all data relevant to the classifiers.
"""
def __init__(self, *files, column=None, decap=False):
"""
Create a new data object with the following attributes:
* instances - list of raw text instances
* labels - array of instance labels in same order as raw text
* features - matrix of feature vectors per text instance
* names - array of feature names in same order as features
Both features and names are undefined until extracted
using some Vectorizer.
Use `decap=True` to lower-case the first letter of each sentence.
"""
try:
if column is None:
self.instances = [f.readlines() for f in files]
self.raw = self.instances
else:
read = TokenReader(word_col=column)
self.raw, self.instances = zip(*[read(f) for f in files])
except UnicodeDecodeError as e:
import sys
print('decoding error:', e.reason, 'in input file')
sys.exit(1)
if decap:
for group in self.instances:
for i in range(len(group)):
s = group[i]
group[i] = "{}{}".format(s[0].lower(), s[1:])
# ensure the minority label(s) come first (evaluation!)
self.instances = sorted(self.instances, key=len)
self.classes = len(self.instances)
self.raw = sorted(self.raw, key=len)
self.labels = np.concatenate([
(np.zeros(len(data), dtype=np.uint8) + i)
for i, data in enumerate(self.instances)
])
self.instances = list(chain.from_iterable(self.instances))
self.raw = list(chain.from_iterable(self.raw))
self.features = None
self.names = None
def extract(self, vectorizer):
"""Extract the features from the instances using a Vectorizer."""
self.features = vectorizer.fit_transform(self.instances, self.labels)
self.names = np.asarray(vectorizer.get_feature_names())
return self
def transform(self, method):
"""Transform the features with a selection or transformation method."""
self.features = method.fit_transform(self.features, self.labels)
return self
@property
def n_features(self):
"""The number of features."""
return self.features.shape[1]
@property
def n_instances(self):
"""The (total) number of instances."""
return self.features.shape[0]
@property
def sizes(self):
"""Number of instances per class."""
counter = {}
for l in self.labels:
try:
counter[l] += 1
except KeyError:
counter[l] = 1
return [counter[l] for l in sorted(counter.keys())]
class Line:
"""
A line being "constructed" by the `TokenReader`.
"""
def __init__(self):
self.buffer = []
self.raw = []
self.entities = {}
self._entity = []
self._entity_type = None
self._filter_fig = False
def hasContent(self):
return len(self.buffer) > 0
def parsingEntity(self):
return self._entity_type is not None
def openEntity(self, name, token):
if not self._entity_type == name:
self.closeEntity()
self.raw.append('<{}>'.format(name))
self._entity_type = name
if name not in self.entities:
self.entities[name] = set()
self._entity.append(token)
def closeEntity(self):
if self._entity_type:
name = self._entity_type
self.raw.append('</{}>'.format(name))
self.entities[name].add(' '.join(self._entity))
self._entity = []
self._entity_type = None
self.stopFilteringFigure()
def filteringFigure(self):
return self._filter_fig
def startFilteringFigure(self):
self._filter_fig = True
def stopFilteringFigure(self):
self._filter_fig = False
def append(self, token, raw=None):
self.buffer.append(token)
if raw:
self.raw.append(raw)
else:
self.raw.append(token)
class TokenReader:
"""
A functor to read IOB entity token files,
masking tagged tokens with their tags.
**Special Case**: For word tokens of the value "Fig"
that have been tagged with an I tag,
the entire (IB+) tag is ignored.
"""
def __init__(self, word_col=2, tag_col=-1):
"""
Set the actual token to column `word_col`, 3rd by default.
Set the IOB-tag to column `tag_col`, last by default.
"""
self.word_col = word_col
self.tag_col = tag_col
self._lines = None
self._raw = None
def __call__(self, stream):
"""
Parse in input stream of tagged tokens.
Return two lists:
one with the raw (content) lines using only the "word" tokens and
one where all tagged tokens have been replaced with their (I/B-) tags.
"""
data = Line()
self._lines = []
self._raw = []
for line in stream:
content = line.strip()
if not content:
if data.hasContent():
data = self._composeLine(data)
else:
items = content.split('\t')
TokenReader._parseTokenTag(data, items[self.word_col],
items[self.tag_col])
return self._raw, self._lines
@staticmethod
def _parseTokenTag(data, token, tag):
if tag == 'O':
data.closeEntity()
data.append(token)
elif tag.startswith('B-') or \
tag.startswith('I-'):
if token in UNMASK:
data.closeEntity()
if token == 'Fig':
data.startFilteringFigure()
else:
data.stopFilteringFigure()
data.append(token)
elif data.filteringFigure():
data.append(token)
elif token in ('.', '-'):
if not data.parsingEntity():
data.buffer.append(token)
data.append(token)
else:
data.openEntity(tag[2:], token)
data.append(tag, raw=token)
else:
raise ValueError('unknown IOB tag "%s" for "%s"' % (tag, token))
def _composeLine(self, data):
data.closeEntity()
entity_counts = ' '.join(
'{}-{}'.format(e_type, len(data.entities[e_type]))
for e_type in data.entities
)
self._lines.append('{} {}'.format(' '.join(data.buffer), entity_counts))
self._raw.append(' '.join(data.raw))
return Line()
def GridSearch(data, pipeline, parameters, report):
"""Do a gird search for the `parameters` of a `pipeline`."""
grid = GridSearchCV(pipeline, parameters, scoring=Scorer,
cv=report.folds, refit=False, n_jobs=4, verbose=1)
grid.fit(data.instances, data.labels)
print("best score:", grid.best_score_)
for name, value in grid.best_params_.items():
print('{}:\t{}'.format(name, repr(value)))
def Predict(data, pipeline, sep='\t'):
"""
Predict and print the lables for `data` using a sklearn `pipeline`.
In addition, a confidence value for each label is printed.
The lines, the label, and the confidenve value are separated by `sep`.
"""
labels = pipeline.predict(data.instances)
# find an appropriate confidence score method given the predictor
if hasattr(pipeline, "decision_function"):
scorer = pipeline.decision_function
elif hasattr(pipeline, "predict_log_proba"):
scorer = pipeline.predict_log_proba
elif hasattr(pipeline, "predict_proba"):
scorer = pipeline.predict_proba
else:
# no known method; default to a "100%" confidence
scorer = lambda X: [1.0] * len(X)
scores = scorer(data.instances)
for i, (l, s) in enumerate(zip(labels, scores)):
# for multi-label problems, get the score of the final label
s = s[l] if isinstance(s, np.ndarray) else s
print(data.raw[i].strip(), l, s, sep=sep)
def Classify(data, classifier, report):
"""
Classify `data` using some sklearn `classifier`,
producing output as given by `report`.
"""
results = {}
scores = {n: np.zeros(report.folds) for n, f in METRICS}
results[classifier.__class__.__name__] = scores
cross_val = StratifiedKFold(data.labels, n_folds=report.folds)
for step, (train, test) in enumerate(cross_val):
classifier.fit(data.features[train], data.labels[train])
targets = data.labels[test]
predictions = classifier.predict(data.features[test])
for measure, scoring_function in METRICS:
if data.classes > 2 and measure == 'MCC score':
scores[measure][step] = 0.0
else:
scores[measure][step] = scoring_function(targets, predictions)
if report.fn or report.fp:
PrintErrors(test, predictions, targets, data, report)
if report.classification:
print(metrics.classification_report(targets, predictions))
if (report.top or report.worst):
PrintFeatures(classifier, data, report)
if report.top or report.worst or report.classification or \
report.fn or report.fp or report.parameters:
print()
EvaluationReport(results)
def PrintErrors(test, predictions, targets, data, report):
"""Reporting of FP and FN instances."""
for i in range(predictions.shape[0]):
if predictions[i] != targets[i]:
if targets[i] == 0 and report.fn:
print("FN:", data.raw[test[i]])
elif targets[i] != 0 and report.fp:
print("FP:", data.raw[test[i]])
def PrintFeatures(classifier, data, report):
"""Reporting of most/least significant features."""
for i in range(classifier.coef_.shape[0]):
if report.top:
topN = np.argsort(classifier.coef_[i])[-report.top:]
else:
topN = []
if report.worst:
worstN = np.argsort(classifier.coef_[i])[:report.worst]
else:
worstN = []
print('group {2} features (top-worst): "{0}", ... "{1}"'.format(
'", "'.join(data.names[topN]),
'", "'.join(data.names[worstN]), i + 1,
))
def EvaluationReport(results):
"""Evaluation result table for all classifiers."""
classifiers = list(sorted(results.keys()))
heading = '{:<10s}'
cell = "{:>2.1f}+/-{:.2f}"
print('MEASURE \t{}'.format('\t'.join(
heading.format(c) for c in classifiers
)))
for m, f in METRICS:
line = [heading.format(m)]
for c in classifiers:
s = results[c][m]
line.append(cell.format(100 * s.mean(), 200 * s.std()))
print('\t'.join(line))
def PrintParams(klass, report):
"""Reporting of classifier parameters."""
if report.top or report.worst or report.classification or \
report.fn or report.fp or report.parameters:
text = "= {} =".format(klass.__class__.__name__)
print('=' * len(text))
print(text)
print('=' * len(text))
if report.parameters:
print("\n".join(
"{}: {}".format(k, v) for k, v in klass.get_params().items()
))
|
# -*- coding: utf-8 -*-
# See LICENSE file for copyright and license details
import table
class Generator(object):
def __init__(self):
self.indent_level = 0
def _indent(self):
return self.indent_level * ' '
def _increnent_indent(self):
self.indent_level += 1
def _decrenent_indent(self):
self.indent_level -= 1
def _generate_function_parameters(self, parameter_list):
if len(parameter_list) == 0:
return 'void'
out = ''
is_first = True
for parameter in parameter_list:
if is_first:
is_first = False
else:
out += ', '
out += parameter.type.value + ' ' + parameter.name
return out
def _generate_function_header(self, name, interface):
out = ''
out += 'void ' + name + '('
if interface.return_type:
out += interface.return_type.value + '* __result, '
out += self._generate_function_parameters(interface.parameter_list)
out += ')'
return out
def _generate_expression_dependancies(self, function, expression):
out = ''
for argument in expression.argument_id_list:
if isinstance(argument, table.LinkToFunctionCall):
last_declaration = self.table.declaration_list[-1]
out += self._generate_expression(
function, last_declaration.expression_list[argument.id])
return out
def _generate_argument(self, function, argument):
out = ''
if isinstance(argument, table.LinkToNumberConstant):
out += str(function.constant_list[argument.id].value)
elif isinstance(argument, table.LinkToFunctionCall):
result_id = function.expression_list[argument.id].result_id.id
out += str(function.variable_list[result_id].name)
else:
raise Exception("Not Implemented")
return out
def _generate_function_call_expression_arguments(
self, function, expression):
out = ''
# out var. passed by pointer
# TODO: check in symtable if there are any return value
out += '&' + function.variable_list[expression.result_id.id].name
for argument in expression.argument_id_list:
out += ', ' + self._generate_argument(function, argument)
return out
def _generate_function_call_expression(self, function, expression):
out = ''
out += self._generate_expression_dependancies(function, expression)
out += self._indent()
out += expression.name
out += '('
out += self._generate_function_call_expression_arguments(
function, expression)
out += ');\n'
return out
def _generate_expression(self, function, expression):
''' Generate evaluation code. '''
out = ''
if isinstance(expression, table.FunctionCallExpression):
out += self._generate_function_call_expression(
function, expression)
else:
raise Exception("Not Implemented")
return out
def _generate_variable_declaration_statement(self, function, statement):
out = ''
expression = function.expression_list[statement.expression_id.id]
out += self._generate_expression(function, expression)
expression_id = statement.expression_id.id
result_id = function.expression_list[expression_id].result_id.id
out += self._indent()
out += function.variable_list[statement.variable_id].name
out += ' = ' + function.variable_list[result_id].name + ';\n'
return out
def _generate_function_call_statement(self, function, statement):
out = ''
expression = function.expression_list[statement.expression_id.id]
out += self._generate_expression(function, expression)
return out
def _generate_if_statement(self, function, statement):
out = ''
expression = function.expression_list[statement.expression_id.id]
assert isinstance(expression, table.FunctionCallExpression)
out += self._generate_expression(function, expression)
out += self._indent() + 'if ('
out += function.variable_list[expression.result_id.id].name
out += ') {\n'
block = function.block_list[statement.if_branch_id]
self._increnent_indent()
out += self._generate_block(function, block)
self._decrenent_indent()
out += self._indent() + '}' + '\n'
return out
def _generate_return_statement(self, function, statement):
out = ''
if isinstance(statement.expression_id, table.LinkToFunctionCall):
expression = function.expression_list[statement.expression_id.id]
out += self._generate_expression(function, expression)
out += self._indent()
out += 'return '
out += self._generate_argument(function, statement.expression_id)
out += ';' + '\n'
return out
def _generate_statement(self, function, statement):
out = ''
if isinstance(statement, table.VariableDeclarationStatement):
out += self._generate_variable_declaration_statement(
function, statement)
elif isinstance(statement, table.FunctionCallStatement):
out += self._generate_function_call_statement(function, statement)
elif isinstance(statement, table.IfStatement):
out += self._generate_if_statement(function, statement)
elif isinstance(statement, table.ReturnStatement):
out += self._generate_return_statement(function, statement)
else:
raise Exception("Not Implemented")
return out
def _generate_block(self, function, block):
out = ''
for statement in block:
out += self._generate_statement(function, statement)
return out
def _generate_local_variables(self, function):
out = ''
for variable in function.variable_list:
out += self._indent()
out += variable.type + ' ' + variable.name
out += ';' + '\n'
return out
def _generate_function(self, function):
out = ''
out += self._generate_function_header(
function.name, function.interface)
out += ' {\n'
self._increnent_indent()
out += self._generate_local_variables(function)
out += '\n'
out += self._generate_block(function, function.block_list[0])
self._decrenent_indent()
out += '}\n'
return out
def _generate_forward_declarations(self):
out = ''
for declaration in self.table.declaration_list:
if isinstance(declaration, table.Function):
out += self._generate_function_header(
declaration.name, declaration.interface)
out += ';\n'
return out
def generate(self):
out = ''
if self.table.import_list is not None:
for import_node in self.table.import_list:
out += '// import: ' + import_node + '\n'
out += '\n'
out += self._generate_forward_declarations()
out += '\n'
for declaration in self.table.declaration_list:
assert isinstance(declaration, table.Function)
out += self._generate_function(declaration)
out += '\n'
return out
# vim: set tabstop=4 shiftwidth=4 softtabstop=4 expandtab:
Generator: Added _generate_imports()
# -*- coding: utf-8 -*-
# See LICENSE file for copyright and license details
import table
class Generator(object):
def __init__(self):
self.indent_level = 0
def _indent(self):
return self.indent_level * ' '
def _increnent_indent(self):
self.indent_level += 1
def _decrenent_indent(self):
self.indent_level -= 1
def _generate_function_parameters(self, parameter_list):
if len(parameter_list) == 0:
return 'void'
out = ''
is_first = True
for parameter in parameter_list:
if is_first:
is_first = False
else:
out += ', '
out += parameter.type.value + ' ' + parameter.name
return out
def _generate_function_header(self, name, interface):
out = ''
out += 'void ' + name + '('
if interface.return_type:
out += interface.return_type.value + '* __result, '
out += self._generate_function_parameters(interface.parameter_list)
out += ')'
return out
def _generate_expression_dependancies(self, function, expression):
out = ''
for argument in expression.argument_id_list:
if isinstance(argument, table.LinkToFunctionCall):
last_declaration = self.table.declaration_list[-1]
out += self._generate_expression(
function, last_declaration.expression_list[argument.id])
return out
def _generate_argument(self, function, argument):
out = ''
if isinstance(argument, table.LinkToNumberConstant):
out += str(function.constant_list[argument.id].value)
elif isinstance(argument, table.LinkToFunctionCall):
result_id = function.expression_list[argument.id].result_id.id
out += str(function.variable_list[result_id].name)
else:
raise Exception("Not Implemented")
return out
def _generate_function_call_expression_arguments(
self, function, expression):
out = ''
# out var. passed by pointer
# TODO: check in symtable if there are any return value
out += '&' + function.variable_list[expression.result_id.id].name
for argument in expression.argument_id_list:
out += ', ' + self._generate_argument(function, argument)
return out
def _generate_function_call_expression(self, function, expression):
out = ''
out += self._generate_expression_dependancies(function, expression)
out += self._indent()
out += expression.name
out += '('
out += self._generate_function_call_expression_arguments(
function, expression)
out += ');\n'
return out
def _generate_expression(self, function, expression):
''' Generate evaluation code. '''
out = ''
if isinstance(expression, table.FunctionCallExpression):
out += self._generate_function_call_expression(
function, expression)
else:
raise Exception("Not Implemented")
return out
def _generate_variable_declaration_statement(self, function, statement):
out = ''
expression = function.expression_list[statement.expression_id.id]
out += self._generate_expression(function, expression)
expression_id = statement.expression_id.id
result_id = function.expression_list[expression_id].result_id.id
out += self._indent()
out += function.variable_list[statement.variable_id].name
out += ' = ' + function.variable_list[result_id].name + ';\n'
return out
def _generate_function_call_statement(self, function, statement):
out = ''
expression = function.expression_list[statement.expression_id.id]
out += self._generate_expression(function, expression)
return out
def _generate_if_statement(self, function, statement):
out = ''
expression = function.expression_list[statement.expression_id.id]
assert isinstance(expression, table.FunctionCallExpression)
out += self._generate_expression(function, expression)
out += self._indent() + 'if ('
out += function.variable_list[expression.result_id.id].name
out += ') {\n'
block = function.block_list[statement.if_branch_id]
self._increnent_indent()
out += self._generate_block(function, block)
self._decrenent_indent()
out += self._indent() + '}' + '\n'
return out
def _generate_return_statement(self, function, statement):
out = ''
if isinstance(statement.expression_id, table.LinkToFunctionCall):
expression = function.expression_list[statement.expression_id.id]
out += self._generate_expression(function, expression)
out += self._indent()
out += 'return '
out += self._generate_argument(function, statement.expression_id)
out += ';' + '\n'
return out
def _generate_statement(self, function, statement):
out = ''
if isinstance(statement, table.VariableDeclarationStatement):
out += self._generate_variable_declaration_statement(
function, statement)
elif isinstance(statement, table.FunctionCallStatement):
out += self._generate_function_call_statement(function, statement)
elif isinstance(statement, table.IfStatement):
out += self._generate_if_statement(function, statement)
elif isinstance(statement, table.ReturnStatement):
out += self._generate_return_statement(function, statement)
else:
raise Exception("Not Implemented")
return out
def _generate_block(self, function, block):
out = ''
for statement in block:
out += self._generate_statement(function, statement)
return out
def _generate_local_variables(self, function):
out = ''
for variable in function.variable_list:
out += self._indent()
out += variable.type + ' ' + variable.name
out += ';' + '\n'
return out
def _generate_function(self, function):
out = ''
out += self._generate_function_header(
function.name, function.interface)
out += ' {\n'
self._increnent_indent()
out += self._generate_local_variables(function)
out += '\n'
out += self._generate_block(function, function.block_list[0])
self._decrenent_indent()
out += '}\n'
return out
def _generate_forward_declarations(self):
out = ''
for declaration in self.table.declaration_list:
if isinstance(declaration, table.Function):
out += self._generate_function_header(
declaration.name, declaration.interface)
out += ';\n'
return out
def _generate_imports(self):
if self.table.import_list is None:
return ''
out = ''
for import_node in self.table.import_list:
out += '// import: ' + import_node + '\n'
return out
def generate(self):
out = ''
out += self._generate_imports()
out += '\n'
out += self._generate_forward_declarations()
out += '\n'
for declaration in self.table.declaration_list:
assert isinstance(declaration, table.Function)
out += self._generate_function(declaration)
out += '\n'
return out
# vim: set tabstop=4 shiftwidth=4 softtabstop=4 expandtab:
|
#!/usr/bin/python -u
#
# generate python wrappers from the XML API description
#
functions = {}
enums = {} # { enumType: { enumConstant: enumValue } }
import os
import sys
import string
import re
if __name__ == "__main__":
# launched as a script
srcPref = os.path.dirname(sys.argv[0])
if len(sys.argv) > 1:
python = sys.argv[1]
else:
print "Python binary not specified"
sys.exit(1)
else:
# imported
srcPref = os.path.dirname(__file__)
#######################################################################
#
# That part if purely the API acquisition phase from the
# libvirt API description
#
#######################################################################
import os
import xml.sax
debug = 0
def getparser():
# Attach parser to an unmarshalling object. return both objects.
target = docParser()
parser = xml.sax.make_parser()
parser.setContentHandler(target)
return parser, target
class docParser(xml.sax.handler.ContentHandler):
def __init__(self):
self._methodname = None
self._data = []
self.in_function = 0
self.startElement = self.start
self.endElement = self.end
self.characters = self.data
def close(self):
if debug:
print "close"
def getmethodname(self):
return self._methodname
def data(self, text):
if debug:
print "data %s" % text
self._data.append(text)
def cdata(self, text):
if debug:
print "data %s" % text
self._data.append(text)
def start(self, tag, attrs):
if debug:
print "start %s, %s" % (tag, attrs)
if tag == 'function':
self._data = []
self.in_function = 1
self.function = None
self.function_cond = None
self.function_args = []
self.function_descr = None
self.function_return = None
self.function_file = None
if attrs.has_key('name'):
self.function = attrs['name']
if attrs.has_key('file'):
self.function_file = attrs['file']
elif tag == 'cond':
self._data = []
elif tag == 'info':
self._data = []
elif tag == 'arg':
if self.in_function == 1:
self.function_arg_name = None
self.function_arg_type = None
self.function_arg_info = None
if attrs.has_key('name'):
self.function_arg_name = attrs['name']
if self.function_arg_name == 'from':
self.function_arg_name = 'frm'
if attrs.has_key('type'):
self.function_arg_type = attrs['type']
if attrs.has_key('info'):
self.function_arg_info = attrs['info']
elif tag == 'return':
if self.in_function == 1:
self.function_return_type = None
self.function_return_info = None
self.function_return_field = None
if attrs.has_key('type'):
self.function_return_type = attrs['type']
if attrs.has_key('info'):
self.function_return_info = attrs['info']
if attrs.has_key('field'):
self.function_return_field = attrs['field']
elif tag == 'enum':
enum(attrs['type'],attrs['name'],attrs['value'])
def end(self, tag):
if debug:
print "end %s" % tag
if tag == 'function':
if self.function != None:
function(self.function, self.function_descr,
self.function_return, self.function_args,
self.function_file, self.function_cond)
self.in_function = 0
elif tag == 'arg':
if self.in_function == 1:
self.function_args.append([self.function_arg_name,
self.function_arg_type,
self.function_arg_info])
elif tag == 'return':
if self.in_function == 1:
self.function_return = [self.function_return_type,
self.function_return_info,
self.function_return_field]
elif tag == 'info':
str = ''
for c in self._data:
str = str + c
if self.in_function == 1:
self.function_descr = str
elif tag == 'cond':
str = ''
for c in self._data:
str = str + c
if self.in_function == 1:
self.function_cond = str
def function(name, desc, ret, args, file, cond):
functions[name] = (desc, ret, args, file, cond)
def enum(type, name, value):
if not enums.has_key(type):
enums[type] = {}
if value == 'VIR_TYPED_PARAM_INT':
value = 1
elif value == 'VIR_TYPED_PARAM_UINT':
value = 2
elif value == 'VIR_TYPED_PARAM_LLONG':
value = 3
elif value == 'VIR_TYPED_PARAM_ULLONG':
value = 4
elif value == 'VIR_TYPED_PARAM_DOUBLE':
value = 5
elif value == 'VIR_TYPED_PARAM_BOOLEAN':
value = 6
elif value == 'VIR_DOMAIN_AFFECT_CURRENT':
value = 0
elif value == 'VIR_DOMAIN_AFFECT_LIVE':
value = 1
elif value == 'VIR_DOMAIN_AFFECT_CONFIG':
value = 2
enums[type][name] = value
#######################################################################
#
# Some filtering rukes to drop functions/types which should not
# be exposed as-is on the Python interface
#
#######################################################################
functions_failed = []
functions_skipped = [
"virConnectListDomains",
]
skipped_modules = {
}
skipped_types = {
# 'int *': "usually a return type",
'virConnectDomainEventCallback': "No function types in python",
'virConnectDomainEventGenericCallback': "No function types in python",
'virConnectDomainEventRTCChangeCallback': "No function types in python",
'virConnectDomainEventWatchdogCallback': "No function types in python",
'virConnectDomainEventIOErrorCallback': "No function types in python",
'virConnectDomainEventGraphicsCallback': "No function types in python",
'virStreamEventCallback': "No function types in python",
'virEventHandleCallback': "No function types in python",
'virEventTimeoutCallback': "No function types in python",
}
#######################################################################
#
# Table of remapping to/from the python type or class to the C
# counterpart.
#
#######################################################################
py_types = {
'void': (None, None, None, None),
'int': ('i', None, "int", "int"),
'long': ('l', None, "long", "long"),
'double': ('d', None, "double", "double"),
'unsigned int': ('i', None, "int", "int"),
'unsigned long': ('l', None, "long", "long"),
'unsigned long long': ('l', None, "longlong", "long long"),
'unsigned char *': ('z', None, "charPtr", "char *"),
'char *': ('z', None, "charPtr", "char *"),
'const char *': ('z', None, "charPtrConst", "const char *"),
'virDomainPtr': ('O', "virDomain", "virDomainPtr", "virDomainPtr"),
'const virDomainPtr': ('O', "virDomain", "virDomainPtr", "virDomainPtr"),
'virDomain *': ('O', "virDomain", "virDomainPtr", "virDomainPtr"),
'const virDomain *': ('O', "virDomain", "virDomainPtr", "virDomainPtr"),
'virNetworkPtr': ('O', "virNetwork", "virNetworkPtr", "virNetworkPtr"),
'const virNetworkPtr': ('O', "virNetwork", "virNetworkPtr", "virNetworkPtr"),
'virNetwork *': ('O', "virNetwork", "virNetworkPtr", "virNetworkPtr"),
'const virNetwork *': ('O', "virNetwork", "virNetworkPtr", "virNetworkPtr"),
'virInterfacePtr': ('O', "virInterface", "virInterfacePtr", "virInterfacePtr"),
'const virInterfacePtr': ('O', "virInterface", "virInterfacePtr", "virInterfacePtr"),
'virInterface *': ('O', "virInterface", "virInterfacePtr", "virInterfacePtr"),
'const virInterface *': ('O', "virInterface", "virInterfacePtr", "virInterfacePtr"),
'virStoragePoolPtr': ('O', "virStoragePool", "virStoragePoolPtr", "virStoragePoolPtr"),
'const virStoragePoolPtr': ('O', "virStoragePool", "virStoragePoolPtr", "virStoragePoolPtr"),
'virStoragePool *': ('O', "virStoragePool", "virStoragePoolPtr", "virStoragePoolPtr"),
'const virStoragePool *': ('O', "virStoragePool", "virStoragePoolPtr", "virStoragePoolPtr"),
'virStorageVolPtr': ('O', "virStorageVol", "virStorageVolPtr", "virStorageVolPtr"),
'const virStorageVolPtr': ('O', "virStorageVol", "virStorageVolPtr", "virStorageVolPtr"),
'virStorageVol *': ('O', "virStorageVol", "virStorageVolPtr", "virStorageVolPtr"),
'const virStorageVol *': ('O', "virStorageVol", "virStorageVolPtr", "virStorageVolPtr"),
'virConnectPtr': ('O', "virConnect", "virConnectPtr", "virConnectPtr"),
'const virConnectPtr': ('O', "virConnect", "virConnectPtr", "virConnectPtr"),
'virConnect *': ('O', "virConnect", "virConnectPtr", "virConnectPtr"),
'const virConnect *': ('O', "virConnect", "virConnectPtr", "virConnectPtr"),
'virNodeDevicePtr': ('O', "virNodeDevice", "virNodeDevicePtr", "virNodeDevicePtr"),
'const virNodeDevicePtr': ('O', "virNodeDevice", "virNodeDevicePtr", "virNodeDevicePtr"),
'virNodeDevice *': ('O', "virNodeDevice", "virNodeDevicePtr", "virNodeDevicePtr"),
'const virNodeDevice *': ('O', "virNodeDevice", "virNodeDevicePtr", "virNodeDevicePtr"),
'virSecretPtr': ('O', "virSecret", "virSecretPtr", "virSecretPtr"),
'const virSecretPtr': ('O', "virSecret", "virSecretPtr", "virSecretPtr"),
'virSecret *': ('O', "virSecret", "virSecretPtr", "virSecretPtr"),
'const virSecret *': ('O', "virSecret", "virSecretPtr", "virSecretPtr"),
'virNWFilterPtr': ('O', "virNWFilter", "virNWFilterPtr", "virNWFilterPtr"),
'const virNWFilterPtr': ('O', "virNWFilter", "virNWFilterPtr", "virNWFilterPtr"),
'virNWFilter *': ('O', "virNWFilter", "virNWFilterPtr", "virNWFilterPtr"),
'const virNWFilter *': ('O', "virNWFilter", "virNWFilterPtr", "virNWFilterPtr"),
'virStreamPtr': ('O', "virStream", "virStreamPtr", "virStreamPtr"),
'const virStreamPtr': ('O', "virStream", "virStreamPtr", "virStreamPtr"),
'virStream *': ('O', "virStream", "virStreamPtr", "virStreamPtr"),
'const virStream *': ('O', "virStream", "virStreamPtr", "virStreamPtr"),
'virDomainSnapshotPtr': ('O', "virDomainSnapshot", "virDomainSnapshotPtr", "virDomainSnapshotPtr"),
'const virDomainSnapshotPtr': ('O', "virDomainSnapshot", "virDomainSnapshotPtr", "virDomainSnapshotPtr"),
'virDomainSnapshot *': ('O', "virDomainSnapshot", "virDomainSnapshotPtr", "virDomainSnapshotPtr"),
'const virDomainSnapshot *': ('O', "virDomainSnapshot", "virDomainSnapshotPtr", "virDomainSnapshotPtr"),
}
py_return_types = {
}
unknown_types = {}
foreign_encoding_args = (
)
#######################################################################
#
# This part writes the C <-> Python stubs libvirt.[ch] and
# the table libvirt-export.c to add when registrering the Python module
#
#######################################################################
# Class methods which are written by hand in libvir.c but the Python-level
# code is still automatically generated (so they are not in skip_function()).
skip_impl = (
'virConnectGetVersion',
'virConnectGetLibVersion',
'virConnectListDomainsID',
'virConnectListDefinedDomains',
'virConnectListNetworks',
'virConnectListDefinedNetworks',
'virConnectListSecrets',
'virConnectListInterfaces',
'virConnectListStoragePools',
'virConnectListDefinedStoragePools',
'virConnectListStorageVols',
'virConnectListDefinedStorageVols',
'virConnectListDefinedInterfaces',
'virConnectListNWFilters',
'virDomainSnapshotListNames',
'virConnGetLastError',
'virGetLastError',
'virDomainGetInfo',
'virDomainGetState',
'virDomainGetControlInfo',
'virDomainGetBlockInfo',
'virDomainGetJobInfo',
'virNodeGetInfo',
'virDomainGetUUID',
'virDomainGetUUIDString',
'virDomainLookupByUUID',
'virNetworkGetUUID',
'virNetworkGetUUIDString',
'virNetworkLookupByUUID',
'virDomainGetAutostart',
'virNetworkGetAutostart',
'virDomainBlockStats',
'virDomainInterfaceStats',
'virDomainMemoryStats',
'virNodeGetCellsFreeMemory',
'virDomainGetSchedulerType',
'virDomainGetSchedulerParameters',
'virDomainGetSchedulerParametersFlags',
'virDomainSetSchedulerParameters',
'virDomainSetSchedulerParametersFlags',
'virDomainSetBlkioParameters',
'virDomainGetBlkioParameters',
'virDomainSetMemoryParameters',
'virDomainGetMemoryParameters',
'virDomainGetVcpus',
'virDomainPinVcpu',
'virSecretGetValue',
'virSecretSetValue',
'virSecretGetUUID',
'virSecretGetUUIDString',
'virSecretLookupByUUID',
'virNWFilterGetUUID',
'virNWFilterGetUUIDString',
'virNWFilterLookupByUUID',
'virStoragePoolGetUUID',
'virStoragePoolGetUUIDString',
'virStoragePoolLookupByUUID',
'virStoragePoolGetInfo',
'virStorageVolGetInfo',
'virStoragePoolGetAutostart',
'virStoragePoolListVolumes',
'virDomainBlockPeek',
'virDomainMemoryPeek',
'virEventRegisterImpl',
'virNodeListDevices',
'virNodeDeviceListCaps',
'virConnectBaselineCPU',
'virDomainRevertToSnapshot',
'virDomainSendKey',
'virNodeGetCPUStats',
'virNodeGetMemoryStats',
'virDomainBlockPull',
'virDomainGetBlockPullInfo',
)
# These are functions which the generator skips completly - no python
# or C code is generated. Generally should not be used for any more
# functions than those already listed
skip_function = (
'virConnectListDomains', # Python API is called virConectListDomainsID for unknown reasons
'virConnSetErrorFunc', # Not used in Python API XXX is this a bug ?
'virResetError', # Not used in Python API XXX is this a bug ?
'virGetVersion', # Python C code is manually written
'virSetErrorFunc', # Python API is called virRegisterErrorHandler for unknown reasons
'virConnCopyLastError', # Python API is called virConnGetLastError instead
'virCopyLastError', # Python API is called virGetLastError instead
'virConnectOpenAuth', # Python C code is manually written
'virDefaultErrorFunc', # Python virErrorFuncHandler impl calls this from C
'virDomainGetSecurityLabel', # Needs investigation...
'virNodeGetSecurityModel', # Needs investigation...
'virConnectDomainEventRegister', # overridden in virConnect.py
'virConnectDomainEventDeregister', # overridden in virConnect.py
'virConnectDomainEventRegisterAny', # overridden in virConnect.py
'virConnectDomainEventDeregisterAny', # overridden in virConnect.py
'virSaveLastError', # We have our own python error wrapper
'virFreeError', # Only needed if we use virSaveLastError
'virStreamFree', # Overridden in libvirt-override-virStream.py
'virStreamRecvAll', # Pure python libvirt-override-virStream.py
'virStreamSendAll', # Pure python libvirt-override-virStream.py
'virStreamRecv', # overridden in libvirt-override-virStream.py
'virStreamSend', # overridden in libvirt-override-virStream.py
# 'Ref' functions have no use for bindings users.
"virConnectRef",
"virDomainRef",
"virInterfaceRef",
"virNetworkRef",
"virNodeDeviceRef",
"virSecretRef",
"virNWFilterRef",
"virStoragePoolRef",
"virStorageVolRef",
'virStreamRef',
# This functions shouldn't be called via the bindings (and even the docs
# contain an explicit warning to that effect). The equivalent should be
# implemented in pure python for each class
"virDomainGetConnect",
"virInterfaceGetConnect",
"virNetworkGetConnect",
"virSecretGetConnect",
"virNWFilterGetConnect",
"virStoragePoolGetConnect",
"virStorageVolGetConnect",
)
function_skip_index_one = (
"virDomainRevertToSnapshot",
)
def print_function_wrapper(name, output, export, include):
global py_types
global unknown_types
global functions
global skipped_modules
try:
(desc, ret, args, file, cond) = functions[name]
except:
print "failed to get function %s infos"
return
if skipped_modules.has_key(file):
return 0
if name in skip_function:
return 0
if name in skip_impl:
# Don't delete the function entry in the caller.
return 1
c_call = "";
format=""
format_args=""
c_args=""
c_return=""
c_convert=""
num_bufs=0
for arg in args:
# This should be correct
if arg[1][0:6] == "const ":
arg[1] = arg[1][6:]
c_args = c_args + " %s %s;\n" % (arg[1], arg[0])
if py_types.has_key(arg[1]):
(f, t, n, c) = py_types[arg[1]]
if (f == 'z') and (name in foreign_encoding_args) and (num_bufs == 0):
f = 't#'
if f != None:
format = format + f
if t != None:
format_args = format_args + ", &pyobj_%s" % (arg[0])
c_args = c_args + " PyObject *pyobj_%s;\n" % (arg[0])
c_convert = c_convert + \
" %s = (%s) Py%s_Get(pyobj_%s);\n" % (arg[0],
arg[1], t, arg[0]);
else:
format_args = format_args + ", &%s" % (arg[0])
if f == 't#':
format_args = format_args + ", &py_buffsize%d" % num_bufs
c_args = c_args + " int py_buffsize%d;\n" % num_bufs
num_bufs = num_bufs + 1
if c_call != "":
c_call = c_call + ", ";
c_call = c_call + "%s" % (arg[0])
else:
if skipped_types.has_key(arg[1]):
return 0
if unknown_types.has_key(arg[1]):
lst = unknown_types[arg[1]]
lst.append(name)
else:
unknown_types[arg[1]] = [name]
return -1
if format != "":
format = format + ":%s" % (name)
if ret[0] == 'void':
if file == "python_accessor":
if args[1][1] == "char *":
c_call = "\n free(%s->%s);\n" % (
args[0][0], args[1][0], args[0][0], args[1][0])
c_call = c_call + " %s->%s = (%s)strdup((const xmlChar *)%s);\n" % (args[0][0],
args[1][0], args[1][1], args[1][0])
else:
c_call = "\n %s->%s = %s;\n" % (args[0][0], args[1][0],
args[1][0])
else:
c_call = "\n %s(%s);\n" % (name, c_call);
ret_convert = " Py_INCREF(Py_None);\n return(Py_None);\n"
elif py_types.has_key(ret[0]):
(f, t, n, c) = py_types[ret[0]]
c_return = " %s c_retval;\n" % (ret[0])
if file == "python_accessor" and ret[2] != None:
c_call = "\n c_retval = %s->%s;\n" % (args[0][0], ret[2])
else:
c_call = "\n c_retval = %s(%s);\n" % (name, c_call);
ret_convert = " py_retval = libvirt_%sWrap((%s) c_retval);\n" % (n,c)
ret_convert = ret_convert + " return(py_retval);\n"
elif py_return_types.has_key(ret[0]):
(f, t, n, c) = py_return_types[ret[0]]
c_return = " %s c_retval;\n" % (ret[0])
c_call = "\n c_retval = %s(%s);\n" % (name, c_call);
ret_convert = " py_retval = libvirt_%sWrap((%s) c_retval);\n" % (n,c)
ret_convert = ret_convert + " return(py_retval);\n"
else:
if skipped_types.has_key(ret[0]):
return 0
if unknown_types.has_key(ret[0]):
lst = unknown_types[ret[0]]
lst.append(name)
else:
unknown_types[ret[0]] = [name]
return -1
if cond != None and cond != "":
include.write("#if %s\n" % cond)
export.write("#if %s\n" % cond)
output.write("#if %s\n" % cond)
include.write("PyObject * ")
include.write("libvirt_%s(PyObject *self, PyObject *args);\n" % (name));
export.write(" { (char *)\"%s\", libvirt_%s, METH_VARARGS, NULL },\n" %
(name, name))
if file == "python":
# Those have been manually generated
if cond != None and cond != "":
include.write("#endif\n");
export.write("#endif\n");
output.write("#endif\n");
return 1
if file == "python_accessor" and ret[0] != "void" and ret[2] is None:
# Those have been manually generated
if cond != None and cond != "":
include.write("#endif\n");
export.write("#endif\n");
output.write("#endif\n");
return 1
output.write("PyObject *\n")
output.write("libvirt_%s(PyObject *self ATTRIBUTE_UNUSED," % (name))
output.write(" PyObject *args")
if format == "":
output.write(" ATTRIBUTE_UNUSED")
output.write(") {\n")
if ret[0] != 'void':
output.write(" PyObject *py_retval;\n")
if c_return != "":
output.write(c_return)
if c_args != "":
output.write(c_args)
if format != "":
output.write("\n if (!PyArg_ParseTuple(args, (char *)\"%s\"%s))\n" %
(format, format_args))
output.write(" return(NULL);\n")
if c_convert != "":
output.write(c_convert)
output.write("LIBVIRT_BEGIN_ALLOW_THREADS;\n");
output.write(c_call);
output.write("LIBVIRT_END_ALLOW_THREADS;\n");
output.write(ret_convert)
output.write("}\n\n")
if cond != None and cond != "":
include.write("#endif /* %s */\n" % cond)
export.write("#endif /* %s */\n" % cond)
output.write("#endif /* %s */\n" % cond)
return 1
def buildStubs():
global py_types
global py_return_types
global unknown_types
try:
f = open(os.path.join(srcPref,"libvirt-api.xml"))
data = f.read()
(parser, target) = getparser()
parser.feed(data)
parser.close()
except IOError, msg:
try:
f = open(os.path.join(srcPref,"..","docs","libvirt-api.xml"))
data = f.read()
(parser, target) = getparser()
parser.feed(data)
parser.close()
except IOError, msg:
print file, ":", msg
sys.exit(1)
n = len(functions.keys())
print "Found %d functions in libvirt-api.xml" % (n)
py_types['pythonObject'] = ('O', "pythonObject", "pythonObject", "pythonObject")
try:
f = open(os.path.join(srcPref,"libvirt-override-api.xml"))
data = f.read()
(parser, target) = getparser()
parser.feed(data)
parser.close()
except IOError, msg:
print file, ":", msg
print "Found %d functions in libvirt-override-api.xml" % (
len(functions.keys()) - n)
nb_wrap = 0
failed = 0
skipped = 0
include = open("libvirt.h", "w")
include.write("/* Generated */\n\n")
export = open("libvirt-export.c", "w")
export.write("/* Generated */\n\n")
wrapper = open("libvirt.c", "w")
wrapper.write("/* Generated */\n\n")
wrapper.write("#include <Python.h>\n")
wrapper.write("#include <libvirt/libvirt.h>\n")
wrapper.write("#include \"typewrappers.h\"\n")
wrapper.write("#include \"libvirt.h\"\n\n")
for function in functions.keys():
ret = print_function_wrapper(function, wrapper, export, include)
if ret < 0:
failed = failed + 1
functions_failed.append(function)
del functions[function]
if ret == 0:
skipped = skipped + 1
functions_skipped.append(function)
del functions[function]
if ret == 1:
nb_wrap = nb_wrap + 1
include.close()
export.close()
wrapper.close()
print "Generated %d wrapper functions" % nb_wrap
if unknown_types:
print "Missing type converters: "
for type in unknown_types.keys():
print "%s:%d " % (type, len(unknown_types[type])),
print
for f in functions_failed:
print "ERROR: failed %s" % f
if failed > 0:
return -1
return 0
#######################################################################
#
# This part writes part of the Python front-end classes based on
# mapping rules between types and classes and also based on function
# renaming to get consistent function names at the Python level
#
#######################################################################
#
# The type automatically remapped to generated classes
#
classes_type = {
"virDomainPtr": ("._o", "virDomain(self,_obj=%s)", "virDomain"),
"virDomain *": ("._o", "virDomain(self, _obj=%s)", "virDomain"),
"virNetworkPtr": ("._o", "virNetwork(self, _obj=%s)", "virNetwork"),
"virNetwork *": ("._o", "virNetwork(self, _obj=%s)", "virNetwork"),
"virInterfacePtr": ("._o", "virInterface(self, _obj=%s)", "virInterface"),
"virInterface *": ("._o", "virInterface(self, _obj=%s)", "virInterface"),
"virStoragePoolPtr": ("._o", "virStoragePool(self, _obj=%s)", "virStoragePool"),
"virStoragePool *": ("._o", "virStoragePool(self, _obj=%s)", "virStoragePool"),
"virStorageVolPtr": ("._o", "virStorageVol(self, _obj=%s)", "virStorageVol"),
"virStorageVol *": ("._o", "virStorageVol(self, _obj=%s)", "virStorageVol"),
"virNodeDevicePtr": ("._o", "virNodeDevice(self, _obj=%s)", "virNodeDevice"),
"virNodeDevice *": ("._o", "virNodeDevice(self, _obj=%s)", "virNodeDevice"),
"virSecretPtr": ("._o", "virSecret(self, _obj=%s)", "virSecret"),
"virSecret *": ("._o", "virSecret(self, _obj=%s)", "virSecret"),
"virNWFilterPtr": ("._o", "virNWFilter(self, _obj=%s)", "virNWFilter"),
"virNWFilter *": ("._o", "virNWFilter(self, _obj=%s)", "virNWFilter"),
"virStreamPtr": ("._o", "virStream(self, _obj=%s)", "virStream"),
"virStream *": ("._o", "virStream(self, _obj=%s)", "virStream"),
"virConnectPtr": ("._o", "virConnect(_obj=%s)", "virConnect"),
"virConnect *": ("._o", "virConnect(_obj=%s)", "virConnect"),
"virDomainSnapshotPtr": ("._o", "virDomainSnapshot(self,_obj=%s)", "virDomainSnapshot"),
"virDomainSnapshot *": ("._o", "virDomainSnapshot(self, _obj=%s)", "virDomainSnapshot"),
}
converter_type = {
}
primary_classes = ["virDomain", "virNetwork", "virInterface",
"virStoragePool", "virStorageVol",
"virConnect", "virNodeDevice", "virSecret",
"virNWFilter", "virStream", "virDomainSnapshot"]
classes_ancestor = {
}
classes_destructors = {
"virDomain": "virDomainFree",
"virNetwork": "virNetworkFree",
"virInterface": "virInterfaceFree",
"virStoragePool": "virStoragePoolFree",
"virStorageVol": "virStorageVolFree",
"virNodeDevice" : "virNodeDeviceFree",
"virSecret": "virSecretFree",
"virNWFilter": "virNWFilterFree",
"virDomainSnapshot": "virDomainSnapshotFree",
# We hand-craft __del__ for this one
#"virStream": "virStreamFree",
}
class_skip_connect_impl = {
"virConnect" : True,
"virDomainSnapshot": True,
}
class_domain_impl = {
"virDomainSnapshot": True,
}
functions_noexcept = {
'virDomainGetID': True,
'virDomainGetName': True,
'virNetworkGetName': True,
'virInterfaceGetName': True,
'virStoragePoolGetName': True,
'virStorageVolGetName': True,
'virStorageVolGetkey': True,
'virNodeDeviceGetName': True,
'virNodeDeviceGetParent': True,
'virSecretGetUsageType': True,
'virSecretGetUsageID': True,
'virNWFilterGetName': True,
}
reference_keepers = {
}
function_classes = {}
function_classes["None"] = []
function_post = {}
# Functions returning an integral type which need special rules to
# check for errors and raise exceptions.
functions_int_exception_test = {
'virDomainGetMaxMemory': "%s == 0",
}
functions_int_default_test = "%s == -1"
def is_integral_type (name):
return not re.search ("^(unsigned)? ?(int|long)$", name) is None
# Functions returning lists which need special rules to check for errors
# and raise exceptions.
functions_list_exception_test = {
}
functions_list_default_test = "%s is None"
def is_list_type (name):
whitelist = [ "virDomainBlockStats",
"virDomainInterfaceStats" ]
return name[-1:] == "*" or name in whitelist
def nameFixup(name, classe, type, file):
# avoid a desastrous clash
listname = classe + "List"
ll = len(listname)
l = len(classe)
if name[0:l] == listname:
func = name[l:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:16] == "virNetworkDefine":
func = name[3:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:19] == "virNetworkCreateXML":
func = name[3:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:16] == "virNetworkLookup":
func = name[3:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:18] == "virInterfaceDefine":
func = name[3:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:21] == "virInterfaceCreateXML":
func = name[3:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:18] == "virInterfaceLookup":
func = name[3:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:15] == "virSecretDefine":
func = name[3:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:15] == "virSecretLookup":
func = name[3:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:17] == "virNWFilterDefine":
func = name[3:]
func = string.lower(func[0:3]) + func[3:]
elif name[0:17] == "virNWFilterLookup":
func = name[3:]
func = string.lower(func[0:3]) + func[3:]
elif name[0:20] == "virStoragePoolDefine":
func = name[3:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:23] == "virStoragePoolCreateXML":
func = name[3:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:20] == "virStoragePoolLookup":
func = name[3:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:19] == "virStorageVolDefine":
func = name[3:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:19] == "virStorageVolLookup":
func = name[3:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:12] == "virDomainGet":
func = name[12:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:29] == "virDomainSnapshotLookupByName":
func = name[9:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:26] == "virDomainSnapshotListNames":
func = name[9:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:20] == "virDomainSnapshotNum":
func = name[9:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:26] == "virDomainSnapshotCreateXML":
func = name[9:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:24] == "virDomainSnapshotCurrent":
func = name[9:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:17] == "virDomainSnapshot":
func = name[17:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:9] == "virDomain":
func = name[9:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:13] == "virNetworkGet":
func = name[13:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:10] == "virNetwork":
func = name[10:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:15] == "virInterfaceGet":
func = name[15:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:12] == "virInterface":
func = name[12:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:12] == 'virSecretGet':
func = name[12:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:9] == 'virSecret':
func = name[9:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:14] == 'virNWFilterGet':
func = name[14:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:11] == 'virNWFilter':
func = name[11:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:12] == 'virStreamNew':
func = "newStream"
elif name[0:9] == 'virStream':
func = name[9:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:17] == "virStoragePoolGet":
func = name[17:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:14] == "virStoragePool":
func = name[14:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:16] == "virStorageVolGet":
func = name[16:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:13] == "virStorageVol":
func = name[13:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:13] == "virNodeDevice":
if name[13:16] == "Get":
func = string.lower(name[16]) + name[17:]
elif name[13:19] == "Lookup" or name[13:19] == "Create":
func = string.lower(name[3]) + name[4:]
else:
func = string.lower(name[13]) + name[14:]
elif name[0:7] == "virNode":
func = name[7:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:10] == "virConnect":
func = name[10:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:3] == "xml":
func = name[3:]
func = string.lower(func[0:1]) + func[1:]
else:
func = name
if func == "iD":
func = "ID"
if func == "uUID":
func = "UUID"
if func == "uUIDString":
func = "UUIDString"
if func == "oSType":
func = "OSType"
if func == "xMLDesc":
func = "XMLDesc"
if func == "mACString":
func = "MACString"
return func
def functionCompare(info1, info2):
(index1, func1, name1, ret1, args1, file1) = info1
(index2, func2, name2, ret2, args2, file2) = info2
if file1 == file2:
if func1 < func2:
return -1
if func1 > func2:
return 1
if file1 == "python_accessor":
return -1
if file2 == "python_accessor":
return 1
if file1 < file2:
return -1
if file1 > file2:
return 1
return 0
def writeDoc(name, args, indent, output):
if functions[name][0] is None or functions[name][0] == "":
return
val = functions[name][0]
val = string.replace(val, "NULL", "None");
output.write(indent)
output.write('"""')
i = string.find(val, "\n")
while i >= 0:
str = val[0:i+1]
val = val[i+1:]
output.write(str)
i = string.find(val, "\n")
output.write(indent)
output.write(val)
output.write(' """\n')
def buildWrappers():
global ctypes
global py_types
global py_return_types
global unknown_types
global functions
global function_classes
global classes_type
global classes_list
global converter_type
global primary_classes
global converter_type
global classes_ancestor
global converter_type
global primary_classes
global classes_ancestor
global classes_destructors
global functions_noexcept
for type in classes_type.keys():
function_classes[classes_type[type][2]] = []
#
# Build the list of C types to look for ordered to start
# with primary classes
#
ctypes = []
classes_list = []
ctypes_processed = {}
classes_processed = {}
for classe in primary_classes:
classes_list.append(classe)
classes_processed[classe] = ()
for type in classes_type.keys():
tinfo = classes_type[type]
if tinfo[2] == classe:
ctypes.append(type)
ctypes_processed[type] = ()
for type in classes_type.keys():
if ctypes_processed.has_key(type):
continue
tinfo = classes_type[type]
if not classes_processed.has_key(tinfo[2]):
classes_list.append(tinfo[2])
classes_processed[tinfo[2]] = ()
ctypes.append(type)
ctypes_processed[type] = ()
for name in functions.keys():
found = 0;
(desc, ret, args, file, cond) = functions[name]
for type in ctypes:
classe = classes_type[type][2]
if name[0:3] == "vir" and len(args) >= 1 and args[0][1] == type:
found = 1
func = nameFixup(name, classe, type, file)
info = (0, func, name, ret, args, file)
function_classes[classe].append(info)
elif name[0:3] == "vir" and len(args) >= 2 and args[1][1] == type \
and file != "python_accessor" and not name in function_skip_index_one:
found = 1
func = nameFixup(name, classe, type, file)
info = (1, func, name, ret, args, file)
function_classes[classe].append(info)
if found == 1:
continue
func = nameFixup(name, "None", file, file)
info = (0, func, name, ret, args, file)
function_classes['None'].append(info)
classes = open("libvirt.py", "w")
extra = open(os.path.join(srcPref,"libvirt-override.py"), "r")
classes.write("#! " + python + " -i\n")
classes.write("#\n")
classes.write("# WARNING WARNING WARNING WARNING\n")
classes.write("#\n")
classes.write("# This file is automatically written by generator.py. Any changes\n")
classes.write("# made here will be lost.\n")
classes.write("#\n")
classes.write("# To change the manually written methods edit libvirt-override.py\n")
classes.write("# To change the automatically written methods edit generator.py\n")
classes.write("#\n")
classes.write("# WARNING WARNING WARNING WARNING\n")
classes.write("#\n")
classes.writelines(extra.readlines())
classes.write("#\n")
classes.write("# WARNING WARNING WARNING WARNING\n")
classes.write("#\n")
classes.write("# Automatically written part of python bindings for libvirt\n")
classes.write("#\n")
classes.write("# WARNING WARNING WARNING WARNING\n")
extra.close()
if function_classes.has_key("None"):
flist = function_classes["None"]
flist.sort(functionCompare)
oldfile = ""
for info in flist:
(index, func, name, ret, args, file) = info
if file != oldfile:
classes.write("#\n# Functions from module %s\n#\n\n" % file)
oldfile = file
classes.write("def %s(" % func)
n = 0
for arg in args:
if n != 0:
classes.write(", ")
classes.write("%s" % arg[0])
n = n + 1
classes.write("):\n")
writeDoc(name, args, ' ', classes);
for arg in args:
if classes_type.has_key(arg[1]):
classes.write(" if %s is None: %s__o = None\n" %
(arg[0], arg[0]))
classes.write(" else: %s__o = %s%s\n" %
(arg[0], arg[0], classes_type[arg[1]][0]))
if ret[0] != "void":
classes.write(" ret = ");
else:
classes.write(" ");
classes.write("libvirtmod.%s(" % name)
n = 0
for arg in args:
if n != 0:
classes.write(", ");
classes.write("%s" % arg[0])
if classes_type.has_key(arg[1]):
classes.write("__o");
n = n + 1
classes.write(")\n");
if ret[0] != "void":
if classes_type.has_key(ret[0]):
#
# Raise an exception
#
if functions_noexcept.has_key(name):
classes.write(" if ret is None:return None\n");
else:
classes.write(
" if ret is None:raise libvirtError('%s() failed')\n" %
(name))
classes.write(" return ");
classes.write(classes_type[ret[0]][1] % ("ret"));
classes.write("\n");
# For functions returning an integral type there are
# several things that we can do, depending on the
# contents of functions_int_*:
elif is_integral_type (ret[0]):
if not functions_noexcept.has_key (name):
if functions_int_exception_test.has_key (name):
test = functions_int_exception_test[name]
else:
test = functions_int_default_test
classes.write ((" if " + test +
": raise libvirtError ('%s() failed')\n") %
("ret", name))
classes.write(" return ret\n")
elif is_list_type (ret[0]):
if not functions_noexcept.has_key (name):
if functions_list_exception_test.has_key (name):
test = functions_list_exception_test[name]
else:
test = functions_list_default_test
classes.write ((" if " + test +
": raise libvirtError ('%s() failed')\n") %
("ret", name))
classes.write(" return ret\n")
else:
classes.write(" return ret\n")
classes.write("\n");
for classname in classes_list:
if classname == "None":
pass
else:
if classes_ancestor.has_key(classname):
classes.write("class %s(%s):\n" % (classname,
classes_ancestor[classname]))
classes.write(" def __init__(self, _obj=None):\n")
if reference_keepers.has_key(classname):
rlist = reference_keepers[classname]
for ref in rlist:
classes.write(" self.%s = None\n" % ref[1])
classes.write(" self._o = _obj\n")
classes.write(" %s.__init__(self, _obj=_obj)\n\n" % (
classes_ancestor[classname]))
else:
classes.write("class %s:\n" % (classname))
if classname in [ "virDomain", "virNetwork", "virInterface", "virStoragePool",
"virStorageVol", "virNodeDevice", "virSecret","virStream",
"virNWFilter" ]:
classes.write(" def __init__(self, conn, _obj=None):\n")
elif classname in [ 'virDomainSnapshot' ]:
classes.write(" def __init__(self, dom, _obj=None):\n")
else:
classes.write(" def __init__(self, _obj=None):\n")
if reference_keepers.has_key(classname):
list = reference_keepers[classname]
for ref in list:
classes.write(" self.%s = None\n" % ref[1])
if classname in [ "virDomain", "virNetwork", "virInterface",
"virNodeDevice", "virSecret", "virStream",
"virNWFilter" ]:
classes.write(" self._conn = conn\n")
elif classname in [ "virStorageVol", "virStoragePool" ]:
classes.write(" self._conn = conn\n" + \
" if not isinstance(conn, virConnect):\n" + \
" self._conn = conn._conn\n")
elif classname in [ "virDomainSnapshot" ]:
classes.write(" self._dom = dom\n")
classes.write(" if _obj != None:self._o = _obj;return\n")
classes.write(" self._o = None\n\n");
destruct=None
if classes_destructors.has_key(classname):
classes.write(" def __del__(self):\n")
classes.write(" if self._o != None:\n")
classes.write(" libvirtmod.%s(self._o)\n" %
classes_destructors[classname]);
classes.write(" self._o = None\n\n");
destruct=classes_destructors[classname]
if not class_skip_connect_impl.has_key(classname):
# Build python safe 'connect' method
classes.write(" def connect(self):\n")
classes.write(" return self._conn\n\n")
if class_domain_impl.has_key(classname):
classes.write(" def domain(self):\n")
classes.write(" return self._dom\n\n")
flist = function_classes[classname]
flist.sort(functionCompare)
oldfile = ""
for info in flist:
(index, func, name, ret, args, file) = info
#
# Do not provide as method the destructors for the class
# to avoid double free
#
if name == destruct:
continue;
if file != oldfile:
if file == "python_accessor":
classes.write(" # accessors for %s\n" % (classname))
else:
classes.write(" #\n")
classes.write(" # %s functions from module %s\n" % (
classname, file))
classes.write(" #\n\n")
oldfile = file
classes.write(" def %s(self" % func)
n = 0
for arg in args:
if n != index:
classes.write(", %s" % arg[0])
n = n + 1
classes.write("):\n")
writeDoc(name, args, ' ', classes);
n = 0
for arg in args:
if classes_type.has_key(arg[1]):
if n != index:
classes.write(" if %s is None: %s__o = None\n" %
(arg[0], arg[0]))
classes.write(" else: %s__o = %s%s\n" %
(arg[0], arg[0], classes_type[arg[1]][0]))
n = n + 1
if ret[0] != "void":
classes.write(" ret = ");
else:
classes.write(" ");
classes.write("libvirtmod.%s(" % name)
n = 0
for arg in args:
if n != 0:
classes.write(", ");
if n != index:
classes.write("%s" % arg[0])
if classes_type.has_key(arg[1]):
classes.write("__o");
else:
classes.write("self");
if classes_type.has_key(arg[1]):
classes.write(classes_type[arg[1]][0])
n = n + 1
classes.write(")\n");
if name == "virConnectClose":
classes.write(" self._o = None\n")
# For functions returning object types:
if ret[0] != "void":
if classes_type.has_key(ret[0]):
#
# Raise an exception
#
if functions_noexcept.has_key(name):
classes.write(
" if ret is None:return None\n");
else:
if classname == "virConnect":
classes.write(
" if ret is None:raise libvirtError('%s() failed', conn=self)\n" %
(name))
elif classname == "virDomain":
classes.write(
" if ret is None:raise libvirtError('%s() failed', dom=self)\n" %
(name))
elif classname == "virNetwork":
classes.write(
" if ret is None:raise libvirtError('%s() failed', net=self)\n" %
(name))
elif classname == "virInterface":
classes.write(
" if ret is None:raise libvirtError('%s() failed', net=self)\n" %
(name))
elif classname == "virStoragePool":
classes.write(
" if ret is None:raise libvirtError('%s() failed', pool=self)\n" %
(name))
elif classname == "virStorageVol":
classes.write(
" if ret is None:raise libvirtError('%s() failed', vol=self)\n" %
(name))
elif classname == "virDomainSnapshot":
classes.write(
" if ret is None:raise libvirtError('%s() failed', dom=self._dom)\n" %
(name))
else:
classes.write(
" if ret is None:raise libvirtError('%s() failed')\n" %
(name))
#
# generate the returned class wrapper for the object
#
classes.write(" __tmp = ");
classes.write(classes_type[ret[0]][1] % ("ret"));
classes.write("\n");
#
# Sometime one need to keep references of the source
# class in the returned class object.
# See reference_keepers for the list
#
tclass = classes_type[ret[0]][2]
if reference_keepers.has_key(tclass):
list = reference_keepers[tclass]
for pref in list:
if pref[0] == classname:
classes.write(" __tmp.%s = self\n" %
pref[1])
# Post-processing - just before we return.
if function_post.has_key(name):
classes.write(" %s\n" %
(function_post[name]));
#
# return the class
#
classes.write(" return __tmp\n");
elif converter_type.has_key(ret[0]):
#
# Raise an exception
#
if functions_noexcept.has_key(name):
classes.write(
" if ret is None:return None");
# Post-processing - just before we return.
if function_post.has_key(name):
classes.write(" %s\n" %
(function_post[name]));
classes.write(" return ");
classes.write(converter_type[ret[0]] % ("ret"));
classes.write("\n");
# For functions returning an integral type there
# are several things that we can do, depending on
# the contents of functions_int_*:
elif is_integral_type (ret[0]):
if not functions_noexcept.has_key (name):
if functions_int_exception_test.has_key (name):
test = functions_int_exception_test[name]
else:
test = functions_int_default_test
if classname == "virConnect":
classes.write ((" if " + test +
": raise libvirtError ('%s() failed', conn=self)\n") %
("ret", name))
elif classname == "virDomain":
classes.write ((" if " + test +
": raise libvirtError ('%s() failed', dom=self)\n") %
("ret", name))
elif classname == "virNetwork":
classes.write ((" if " + test +
": raise libvirtError ('%s() failed', net=self)\n") %
("ret", name))
elif classname == "virInterface":
classes.write ((" if " + test +
": raise libvirtError ('%s() failed', net=self)\n") %
("ret", name))
elif classname == "virStoragePool":
classes.write ((" if " + test +
": raise libvirtError ('%s() failed', pool=self)\n") %
("ret", name))
elif classname == "virStorageVol":
classes.write ((" if " + test +
": raise libvirtError ('%s() failed', vol=self)\n") %
("ret", name))
else:
classes.write ((" if " + test +
": raise libvirtError ('%s() failed')\n") %
("ret", name))
# Post-processing - just before we return.
if function_post.has_key(name):
classes.write(" %s\n" %
(function_post[name]));
classes.write (" return ret\n")
elif is_list_type (ret[0]):
if not functions_noexcept.has_key (name):
if functions_list_exception_test.has_key (name):
test = functions_list_exception_test[name]
else:
test = functions_list_default_test
if classname == "virConnect":
classes.write ((" if " + test +
": raise libvirtError ('%s() failed', conn=self)\n") %
("ret", name))
elif classname == "virDomain":
classes.write ((" if " + test +
": raise libvirtError ('%s() failed', dom=self)\n") %
("ret", name))
elif classname == "virNetwork":
classes.write ((" if " + test +
": raise libvirtError ('%s() failed', net=self)\n") %
("ret", name))
elif classname == "virInterface":
classes.write ((" if " + test +
": raise libvirtError ('%s() failed', net=self)\n") %
("ret", name))
elif classname == "virStoragePool":
classes.write ((" if " + test +
": raise libvirtError ('%s() failed', pool=self)\n") %
("ret", name))
elif classname == "virStorageVol":
classes.write ((" if " + test +
": raise libvirtError ('%s() failed', vol=self)\n") %
("ret", name))
else:
classes.write ((" if " + test +
": raise libvirtError ('%s() failed')\n") %
("ret", name))
# Post-processing - just before we return.
if function_post.has_key(name):
classes.write(" %s\n" %
(function_post[name]));
classes.write (" return ret\n")
else:
# Post-processing - just before we return.
if function_post.has_key(name):
classes.write(" %s\n" %
(function_post[name]));
classes.write(" return ret\n");
classes.write("\n");
# Append "<classname>.py" to class def, iff it exists
try:
extra = open(os.path.join(srcPref,"libvirt-override-" + classname + ".py"), "r")
classes.write (" #\n")
classes.write (" # %s methods from %s.py (hand coded)\n" % (classname,classname))
classes.write (" #\n")
classes.writelines(extra.readlines())
classes.write("\n")
extra.close()
except:
pass
#
# Generate enum constants
#
for type,enum in enums.items():
classes.write("# %s\n" % type)
items = enum.items()
items.sort(lambda i1,i2: cmp(long(i1[1]),long(i2[1])))
for name,value in items:
classes.write("%s = %s\n" % (name,value))
classes.write("\n");
classes.close()
if buildStubs() < 0:
sys.exit(1)
buildWrappers()
sys.exit(0)
python: Generate virStreamFree but don't expose in bindings
Turns out I was right in removing this the first time :) This is
needed in our custom __del__ function, but the C code wasn't
being generated. Add new infrastructure to do what we want
#!/usr/bin/python -u
#
# generate python wrappers from the XML API description
#
functions = {}
enums = {} # { enumType: { enumConstant: enumValue } }
import os
import sys
import string
import re
if __name__ == "__main__":
# launched as a script
srcPref = os.path.dirname(sys.argv[0])
if len(sys.argv) > 1:
python = sys.argv[1]
else:
print "Python binary not specified"
sys.exit(1)
else:
# imported
srcPref = os.path.dirname(__file__)
#######################################################################
#
# That part if purely the API acquisition phase from the
# libvirt API description
#
#######################################################################
import os
import xml.sax
debug = 0
def getparser():
# Attach parser to an unmarshalling object. return both objects.
target = docParser()
parser = xml.sax.make_parser()
parser.setContentHandler(target)
return parser, target
class docParser(xml.sax.handler.ContentHandler):
def __init__(self):
self._methodname = None
self._data = []
self.in_function = 0
self.startElement = self.start
self.endElement = self.end
self.characters = self.data
def close(self):
if debug:
print "close"
def getmethodname(self):
return self._methodname
def data(self, text):
if debug:
print "data %s" % text
self._data.append(text)
def cdata(self, text):
if debug:
print "data %s" % text
self._data.append(text)
def start(self, tag, attrs):
if debug:
print "start %s, %s" % (tag, attrs)
if tag == 'function':
self._data = []
self.in_function = 1
self.function = None
self.function_cond = None
self.function_args = []
self.function_descr = None
self.function_return = None
self.function_file = None
if attrs.has_key('name'):
self.function = attrs['name']
if attrs.has_key('file'):
self.function_file = attrs['file']
elif tag == 'cond':
self._data = []
elif tag == 'info':
self._data = []
elif tag == 'arg':
if self.in_function == 1:
self.function_arg_name = None
self.function_arg_type = None
self.function_arg_info = None
if attrs.has_key('name'):
self.function_arg_name = attrs['name']
if self.function_arg_name == 'from':
self.function_arg_name = 'frm'
if attrs.has_key('type'):
self.function_arg_type = attrs['type']
if attrs.has_key('info'):
self.function_arg_info = attrs['info']
elif tag == 'return':
if self.in_function == 1:
self.function_return_type = None
self.function_return_info = None
self.function_return_field = None
if attrs.has_key('type'):
self.function_return_type = attrs['type']
if attrs.has_key('info'):
self.function_return_info = attrs['info']
if attrs.has_key('field'):
self.function_return_field = attrs['field']
elif tag == 'enum':
enum(attrs['type'],attrs['name'],attrs['value'])
def end(self, tag):
if debug:
print "end %s" % tag
if tag == 'function':
if self.function != None:
function(self.function, self.function_descr,
self.function_return, self.function_args,
self.function_file, self.function_cond)
self.in_function = 0
elif tag == 'arg':
if self.in_function == 1:
self.function_args.append([self.function_arg_name,
self.function_arg_type,
self.function_arg_info])
elif tag == 'return':
if self.in_function == 1:
self.function_return = [self.function_return_type,
self.function_return_info,
self.function_return_field]
elif tag == 'info':
str = ''
for c in self._data:
str = str + c
if self.in_function == 1:
self.function_descr = str
elif tag == 'cond':
str = ''
for c in self._data:
str = str + c
if self.in_function == 1:
self.function_cond = str
def function(name, desc, ret, args, file, cond):
functions[name] = (desc, ret, args, file, cond)
def enum(type, name, value):
if not enums.has_key(type):
enums[type] = {}
if value == 'VIR_TYPED_PARAM_INT':
value = 1
elif value == 'VIR_TYPED_PARAM_UINT':
value = 2
elif value == 'VIR_TYPED_PARAM_LLONG':
value = 3
elif value == 'VIR_TYPED_PARAM_ULLONG':
value = 4
elif value == 'VIR_TYPED_PARAM_DOUBLE':
value = 5
elif value == 'VIR_TYPED_PARAM_BOOLEAN':
value = 6
elif value == 'VIR_DOMAIN_AFFECT_CURRENT':
value = 0
elif value == 'VIR_DOMAIN_AFFECT_LIVE':
value = 1
elif value == 'VIR_DOMAIN_AFFECT_CONFIG':
value = 2
enums[type][name] = value
#######################################################################
#
# Some filtering rukes to drop functions/types which should not
# be exposed as-is on the Python interface
#
#######################################################################
functions_failed = []
functions_skipped = [
"virConnectListDomains",
]
skipped_modules = {
}
skipped_types = {
# 'int *': "usually a return type",
'virConnectDomainEventCallback': "No function types in python",
'virConnectDomainEventGenericCallback': "No function types in python",
'virConnectDomainEventRTCChangeCallback': "No function types in python",
'virConnectDomainEventWatchdogCallback': "No function types in python",
'virConnectDomainEventIOErrorCallback': "No function types in python",
'virConnectDomainEventGraphicsCallback': "No function types in python",
'virStreamEventCallback': "No function types in python",
'virEventHandleCallback': "No function types in python",
'virEventTimeoutCallback': "No function types in python",
}
#######################################################################
#
# Table of remapping to/from the python type or class to the C
# counterpart.
#
#######################################################################
py_types = {
'void': (None, None, None, None),
'int': ('i', None, "int", "int"),
'long': ('l', None, "long", "long"),
'double': ('d', None, "double", "double"),
'unsigned int': ('i', None, "int", "int"),
'unsigned long': ('l', None, "long", "long"),
'unsigned long long': ('l', None, "longlong", "long long"),
'unsigned char *': ('z', None, "charPtr", "char *"),
'char *': ('z', None, "charPtr", "char *"),
'const char *': ('z', None, "charPtrConst", "const char *"),
'virDomainPtr': ('O', "virDomain", "virDomainPtr", "virDomainPtr"),
'const virDomainPtr': ('O', "virDomain", "virDomainPtr", "virDomainPtr"),
'virDomain *': ('O', "virDomain", "virDomainPtr", "virDomainPtr"),
'const virDomain *': ('O', "virDomain", "virDomainPtr", "virDomainPtr"),
'virNetworkPtr': ('O', "virNetwork", "virNetworkPtr", "virNetworkPtr"),
'const virNetworkPtr': ('O', "virNetwork", "virNetworkPtr", "virNetworkPtr"),
'virNetwork *': ('O', "virNetwork", "virNetworkPtr", "virNetworkPtr"),
'const virNetwork *': ('O', "virNetwork", "virNetworkPtr", "virNetworkPtr"),
'virInterfacePtr': ('O', "virInterface", "virInterfacePtr", "virInterfacePtr"),
'const virInterfacePtr': ('O', "virInterface", "virInterfacePtr", "virInterfacePtr"),
'virInterface *': ('O', "virInterface", "virInterfacePtr", "virInterfacePtr"),
'const virInterface *': ('O', "virInterface", "virInterfacePtr", "virInterfacePtr"),
'virStoragePoolPtr': ('O', "virStoragePool", "virStoragePoolPtr", "virStoragePoolPtr"),
'const virStoragePoolPtr': ('O', "virStoragePool", "virStoragePoolPtr", "virStoragePoolPtr"),
'virStoragePool *': ('O', "virStoragePool", "virStoragePoolPtr", "virStoragePoolPtr"),
'const virStoragePool *': ('O', "virStoragePool", "virStoragePoolPtr", "virStoragePoolPtr"),
'virStorageVolPtr': ('O', "virStorageVol", "virStorageVolPtr", "virStorageVolPtr"),
'const virStorageVolPtr': ('O', "virStorageVol", "virStorageVolPtr", "virStorageVolPtr"),
'virStorageVol *': ('O', "virStorageVol", "virStorageVolPtr", "virStorageVolPtr"),
'const virStorageVol *': ('O', "virStorageVol", "virStorageVolPtr", "virStorageVolPtr"),
'virConnectPtr': ('O', "virConnect", "virConnectPtr", "virConnectPtr"),
'const virConnectPtr': ('O', "virConnect", "virConnectPtr", "virConnectPtr"),
'virConnect *': ('O', "virConnect", "virConnectPtr", "virConnectPtr"),
'const virConnect *': ('O', "virConnect", "virConnectPtr", "virConnectPtr"),
'virNodeDevicePtr': ('O', "virNodeDevice", "virNodeDevicePtr", "virNodeDevicePtr"),
'const virNodeDevicePtr': ('O', "virNodeDevice", "virNodeDevicePtr", "virNodeDevicePtr"),
'virNodeDevice *': ('O', "virNodeDevice", "virNodeDevicePtr", "virNodeDevicePtr"),
'const virNodeDevice *': ('O', "virNodeDevice", "virNodeDevicePtr", "virNodeDevicePtr"),
'virSecretPtr': ('O', "virSecret", "virSecretPtr", "virSecretPtr"),
'const virSecretPtr': ('O', "virSecret", "virSecretPtr", "virSecretPtr"),
'virSecret *': ('O', "virSecret", "virSecretPtr", "virSecretPtr"),
'const virSecret *': ('O', "virSecret", "virSecretPtr", "virSecretPtr"),
'virNWFilterPtr': ('O', "virNWFilter", "virNWFilterPtr", "virNWFilterPtr"),
'const virNWFilterPtr': ('O', "virNWFilter", "virNWFilterPtr", "virNWFilterPtr"),
'virNWFilter *': ('O', "virNWFilter", "virNWFilterPtr", "virNWFilterPtr"),
'const virNWFilter *': ('O', "virNWFilter", "virNWFilterPtr", "virNWFilterPtr"),
'virStreamPtr': ('O', "virStream", "virStreamPtr", "virStreamPtr"),
'const virStreamPtr': ('O', "virStream", "virStreamPtr", "virStreamPtr"),
'virStream *': ('O', "virStream", "virStreamPtr", "virStreamPtr"),
'const virStream *': ('O', "virStream", "virStreamPtr", "virStreamPtr"),
'virDomainSnapshotPtr': ('O', "virDomainSnapshot", "virDomainSnapshotPtr", "virDomainSnapshotPtr"),
'const virDomainSnapshotPtr': ('O', "virDomainSnapshot", "virDomainSnapshotPtr", "virDomainSnapshotPtr"),
'virDomainSnapshot *': ('O', "virDomainSnapshot", "virDomainSnapshotPtr", "virDomainSnapshotPtr"),
'const virDomainSnapshot *': ('O', "virDomainSnapshot", "virDomainSnapshotPtr", "virDomainSnapshotPtr"),
}
py_return_types = {
}
unknown_types = {}
foreign_encoding_args = (
)
#######################################################################
#
# This part writes the C <-> Python stubs libvirt.[ch] and
# the table libvirt-export.c to add when registrering the Python module
#
#######################################################################
# Class methods which are written by hand in libvir.c but the Python-level
# code is still automatically generated (so they are not in skip_function()).
skip_impl = (
'virConnectGetVersion',
'virConnectGetLibVersion',
'virConnectListDomainsID',
'virConnectListDefinedDomains',
'virConnectListNetworks',
'virConnectListDefinedNetworks',
'virConnectListSecrets',
'virConnectListInterfaces',
'virConnectListStoragePools',
'virConnectListDefinedStoragePools',
'virConnectListStorageVols',
'virConnectListDefinedStorageVols',
'virConnectListDefinedInterfaces',
'virConnectListNWFilters',
'virDomainSnapshotListNames',
'virConnGetLastError',
'virGetLastError',
'virDomainGetInfo',
'virDomainGetState',
'virDomainGetControlInfo',
'virDomainGetBlockInfo',
'virDomainGetJobInfo',
'virNodeGetInfo',
'virDomainGetUUID',
'virDomainGetUUIDString',
'virDomainLookupByUUID',
'virNetworkGetUUID',
'virNetworkGetUUIDString',
'virNetworkLookupByUUID',
'virDomainGetAutostart',
'virNetworkGetAutostart',
'virDomainBlockStats',
'virDomainInterfaceStats',
'virDomainMemoryStats',
'virNodeGetCellsFreeMemory',
'virDomainGetSchedulerType',
'virDomainGetSchedulerParameters',
'virDomainGetSchedulerParametersFlags',
'virDomainSetSchedulerParameters',
'virDomainSetSchedulerParametersFlags',
'virDomainSetBlkioParameters',
'virDomainGetBlkioParameters',
'virDomainSetMemoryParameters',
'virDomainGetMemoryParameters',
'virDomainGetVcpus',
'virDomainPinVcpu',
'virSecretGetValue',
'virSecretSetValue',
'virSecretGetUUID',
'virSecretGetUUIDString',
'virSecretLookupByUUID',
'virNWFilterGetUUID',
'virNWFilterGetUUIDString',
'virNWFilterLookupByUUID',
'virStoragePoolGetUUID',
'virStoragePoolGetUUIDString',
'virStoragePoolLookupByUUID',
'virStoragePoolGetInfo',
'virStorageVolGetInfo',
'virStoragePoolGetAutostart',
'virStoragePoolListVolumes',
'virDomainBlockPeek',
'virDomainMemoryPeek',
'virEventRegisterImpl',
'virNodeListDevices',
'virNodeDeviceListCaps',
'virConnectBaselineCPU',
'virDomainRevertToSnapshot',
'virDomainSendKey',
'virNodeGetCPUStats',
'virNodeGetMemoryStats',
'virDomainBlockPull',
'virDomainGetBlockPullInfo',
)
# These are functions which the generator skips completly - no python
# or C code is generated. Generally should not be used for any more
# functions than those already listed
skip_function = (
'virConnectListDomains', # Python API is called virConectListDomainsID for unknown reasons
'virConnSetErrorFunc', # Not used in Python API XXX is this a bug ?
'virResetError', # Not used in Python API XXX is this a bug ?
'virGetVersion', # Python C code is manually written
'virSetErrorFunc', # Python API is called virRegisterErrorHandler for unknown reasons
'virConnCopyLastError', # Python API is called virConnGetLastError instead
'virCopyLastError', # Python API is called virGetLastError instead
'virConnectOpenAuth', # Python C code is manually written
'virDefaultErrorFunc', # Python virErrorFuncHandler impl calls this from C
'virDomainGetSecurityLabel', # Needs investigation...
'virNodeGetSecurityModel', # Needs investigation...
'virConnectDomainEventRegister', # overridden in virConnect.py
'virConnectDomainEventDeregister', # overridden in virConnect.py
'virConnectDomainEventRegisterAny', # overridden in virConnect.py
'virConnectDomainEventDeregisterAny', # overridden in virConnect.py
'virSaveLastError', # We have our own python error wrapper
'virFreeError', # Only needed if we use virSaveLastError
'virStreamRecvAll', # Pure python libvirt-override-virStream.py
'virStreamSendAll', # Pure python libvirt-override-virStream.py
'virStreamRecv', # overridden in libvirt-override-virStream.py
'virStreamSend', # overridden in libvirt-override-virStream.py
# 'Ref' functions have no use for bindings users.
"virConnectRef",
"virDomainRef",
"virInterfaceRef",
"virNetworkRef",
"virNodeDeviceRef",
"virSecretRef",
"virNWFilterRef",
"virStoragePoolRef",
"virStorageVolRef",
'virStreamRef',
# This functions shouldn't be called via the bindings (and even the docs
# contain an explicit warning to that effect). The equivalent should be
# implemented in pure python for each class
"virDomainGetConnect",
"virInterfaceGetConnect",
"virNetworkGetConnect",
"virSecretGetConnect",
"virNWFilterGetConnect",
"virStoragePoolGetConnect",
"virStorageVolGetConnect",
)
# Generate C code, but skip python impl
function_skip_python_impl = {
"virStreamFree", # Needed in custom virStream __del__, but free shouldn't
# be exposed in bindings
}
function_skip_index_one = (
"virDomainRevertToSnapshot",
)
def print_function_wrapper(name, output, export, include):
global py_types
global unknown_types
global functions
global skipped_modules
global function_skip_python_impl
try:
(desc, ret, args, file, cond) = functions[name]
except:
print "failed to get function %s infos"
return
if skipped_modules.has_key(file):
return 0
if name in skip_function:
return 0
if name in skip_impl:
# Don't delete the function entry in the caller.
return 1
c_call = "";
format=""
format_args=""
c_args=""
c_return=""
c_convert=""
num_bufs=0
for arg in args:
# This should be correct
if arg[1][0:6] == "const ":
arg[1] = arg[1][6:]
c_args = c_args + " %s %s;\n" % (arg[1], arg[0])
if py_types.has_key(arg[1]):
(f, t, n, c) = py_types[arg[1]]
if (f == 'z') and (name in foreign_encoding_args) and (num_bufs == 0):
f = 't#'
if f != None:
format = format + f
if t != None:
format_args = format_args + ", &pyobj_%s" % (arg[0])
c_args = c_args + " PyObject *pyobj_%s;\n" % (arg[0])
c_convert = c_convert + \
" %s = (%s) Py%s_Get(pyobj_%s);\n" % (arg[0],
arg[1], t, arg[0]);
else:
format_args = format_args + ", &%s" % (arg[0])
if f == 't#':
format_args = format_args + ", &py_buffsize%d" % num_bufs
c_args = c_args + " int py_buffsize%d;\n" % num_bufs
num_bufs = num_bufs + 1
if c_call != "":
c_call = c_call + ", ";
c_call = c_call + "%s" % (arg[0])
else:
if skipped_types.has_key(arg[1]):
return 0
if unknown_types.has_key(arg[1]):
lst = unknown_types[arg[1]]
lst.append(name)
else:
unknown_types[arg[1]] = [name]
return -1
if format != "":
format = format + ":%s" % (name)
if ret[0] == 'void':
if file == "python_accessor":
if args[1][1] == "char *":
c_call = "\n free(%s->%s);\n" % (
args[0][0], args[1][0], args[0][0], args[1][0])
c_call = c_call + " %s->%s = (%s)strdup((const xmlChar *)%s);\n" % (args[0][0],
args[1][0], args[1][1], args[1][0])
else:
c_call = "\n %s->%s = %s;\n" % (args[0][0], args[1][0],
args[1][0])
else:
c_call = "\n %s(%s);\n" % (name, c_call);
ret_convert = " Py_INCREF(Py_None);\n return(Py_None);\n"
elif py_types.has_key(ret[0]):
(f, t, n, c) = py_types[ret[0]]
c_return = " %s c_retval;\n" % (ret[0])
if file == "python_accessor" and ret[2] != None:
c_call = "\n c_retval = %s->%s;\n" % (args[0][0], ret[2])
else:
c_call = "\n c_retval = %s(%s);\n" % (name, c_call);
ret_convert = " py_retval = libvirt_%sWrap((%s) c_retval);\n" % (n,c)
ret_convert = ret_convert + " return(py_retval);\n"
elif py_return_types.has_key(ret[0]):
(f, t, n, c) = py_return_types[ret[0]]
c_return = " %s c_retval;\n" % (ret[0])
c_call = "\n c_retval = %s(%s);\n" % (name, c_call);
ret_convert = " py_retval = libvirt_%sWrap((%s) c_retval);\n" % (n,c)
ret_convert = ret_convert + " return(py_retval);\n"
else:
if skipped_types.has_key(ret[0]):
return 0
if unknown_types.has_key(ret[0]):
lst = unknown_types[ret[0]]
lst.append(name)
else:
unknown_types[ret[0]] = [name]
return -1
if cond != None and cond != "":
include.write("#if %s\n" % cond)
export.write("#if %s\n" % cond)
output.write("#if %s\n" % cond)
include.write("PyObject * ")
include.write("libvirt_%s(PyObject *self, PyObject *args);\n" % (name));
export.write(" { (char *)\"%s\", libvirt_%s, METH_VARARGS, NULL },\n" %
(name, name))
if file == "python":
# Those have been manually generated
if cond != None and cond != "":
include.write("#endif\n");
export.write("#endif\n");
output.write("#endif\n");
return 1
if file == "python_accessor" and ret[0] != "void" and ret[2] is None:
# Those have been manually generated
if cond != None and cond != "":
include.write("#endif\n");
export.write("#endif\n");
output.write("#endif\n");
return 1
output.write("PyObject *\n")
output.write("libvirt_%s(PyObject *self ATTRIBUTE_UNUSED," % (name))
output.write(" PyObject *args")
if format == "":
output.write(" ATTRIBUTE_UNUSED")
output.write(") {\n")
if ret[0] != 'void':
output.write(" PyObject *py_retval;\n")
if c_return != "":
output.write(c_return)
if c_args != "":
output.write(c_args)
if format != "":
output.write("\n if (!PyArg_ParseTuple(args, (char *)\"%s\"%s))\n" %
(format, format_args))
output.write(" return(NULL);\n")
if c_convert != "":
output.write(c_convert)
output.write("LIBVIRT_BEGIN_ALLOW_THREADS;\n");
output.write(c_call);
output.write("LIBVIRT_END_ALLOW_THREADS;\n");
output.write(ret_convert)
output.write("}\n\n")
if cond != None and cond != "":
include.write("#endif /* %s */\n" % cond)
export.write("#endif /* %s */\n" % cond)
output.write("#endif /* %s */\n" % cond)
if name in function_skip_python_impl:
return 0
return 1
def buildStubs():
global py_types
global py_return_types
global unknown_types
try:
f = open(os.path.join(srcPref,"libvirt-api.xml"))
data = f.read()
(parser, target) = getparser()
parser.feed(data)
parser.close()
except IOError, msg:
try:
f = open(os.path.join(srcPref,"..","docs","libvirt-api.xml"))
data = f.read()
(parser, target) = getparser()
parser.feed(data)
parser.close()
except IOError, msg:
print file, ":", msg
sys.exit(1)
n = len(functions.keys())
print "Found %d functions in libvirt-api.xml" % (n)
py_types['pythonObject'] = ('O', "pythonObject", "pythonObject", "pythonObject")
try:
f = open(os.path.join(srcPref,"libvirt-override-api.xml"))
data = f.read()
(parser, target) = getparser()
parser.feed(data)
parser.close()
except IOError, msg:
print file, ":", msg
print "Found %d functions in libvirt-override-api.xml" % (
len(functions.keys()) - n)
nb_wrap = 0
failed = 0
skipped = 0
include = open("libvirt.h", "w")
include.write("/* Generated */\n\n")
export = open("libvirt-export.c", "w")
export.write("/* Generated */\n\n")
wrapper = open("libvirt.c", "w")
wrapper.write("/* Generated */\n\n")
wrapper.write("#include <Python.h>\n")
wrapper.write("#include <libvirt/libvirt.h>\n")
wrapper.write("#include \"typewrappers.h\"\n")
wrapper.write("#include \"libvirt.h\"\n\n")
for function in functions.keys():
ret = print_function_wrapper(function, wrapper, export, include)
if ret < 0:
failed = failed + 1
functions_failed.append(function)
del functions[function]
if ret == 0:
skipped = skipped + 1
functions_skipped.append(function)
del functions[function]
if ret == 1:
nb_wrap = nb_wrap + 1
include.close()
export.close()
wrapper.close()
print "Generated %d wrapper functions" % nb_wrap
if unknown_types:
print "Missing type converters: "
for type in unknown_types.keys():
print "%s:%d " % (type, len(unknown_types[type])),
print
for f in functions_failed:
print "ERROR: failed %s" % f
if failed > 0:
return -1
return 0
#######################################################################
#
# This part writes part of the Python front-end classes based on
# mapping rules between types and classes and also based on function
# renaming to get consistent function names at the Python level
#
#######################################################################
#
# The type automatically remapped to generated classes
#
classes_type = {
"virDomainPtr": ("._o", "virDomain(self,_obj=%s)", "virDomain"),
"virDomain *": ("._o", "virDomain(self, _obj=%s)", "virDomain"),
"virNetworkPtr": ("._o", "virNetwork(self, _obj=%s)", "virNetwork"),
"virNetwork *": ("._o", "virNetwork(self, _obj=%s)", "virNetwork"),
"virInterfacePtr": ("._o", "virInterface(self, _obj=%s)", "virInterface"),
"virInterface *": ("._o", "virInterface(self, _obj=%s)", "virInterface"),
"virStoragePoolPtr": ("._o", "virStoragePool(self, _obj=%s)", "virStoragePool"),
"virStoragePool *": ("._o", "virStoragePool(self, _obj=%s)", "virStoragePool"),
"virStorageVolPtr": ("._o", "virStorageVol(self, _obj=%s)", "virStorageVol"),
"virStorageVol *": ("._o", "virStorageVol(self, _obj=%s)", "virStorageVol"),
"virNodeDevicePtr": ("._o", "virNodeDevice(self, _obj=%s)", "virNodeDevice"),
"virNodeDevice *": ("._o", "virNodeDevice(self, _obj=%s)", "virNodeDevice"),
"virSecretPtr": ("._o", "virSecret(self, _obj=%s)", "virSecret"),
"virSecret *": ("._o", "virSecret(self, _obj=%s)", "virSecret"),
"virNWFilterPtr": ("._o", "virNWFilter(self, _obj=%s)", "virNWFilter"),
"virNWFilter *": ("._o", "virNWFilter(self, _obj=%s)", "virNWFilter"),
"virStreamPtr": ("._o", "virStream(self, _obj=%s)", "virStream"),
"virStream *": ("._o", "virStream(self, _obj=%s)", "virStream"),
"virConnectPtr": ("._o", "virConnect(_obj=%s)", "virConnect"),
"virConnect *": ("._o", "virConnect(_obj=%s)", "virConnect"),
"virDomainSnapshotPtr": ("._o", "virDomainSnapshot(self,_obj=%s)", "virDomainSnapshot"),
"virDomainSnapshot *": ("._o", "virDomainSnapshot(self, _obj=%s)", "virDomainSnapshot"),
}
converter_type = {
}
primary_classes = ["virDomain", "virNetwork", "virInterface",
"virStoragePool", "virStorageVol",
"virConnect", "virNodeDevice", "virSecret",
"virNWFilter", "virStream", "virDomainSnapshot"]
classes_ancestor = {
}
classes_destructors = {
"virDomain": "virDomainFree",
"virNetwork": "virNetworkFree",
"virInterface": "virInterfaceFree",
"virStoragePool": "virStoragePoolFree",
"virStorageVol": "virStorageVolFree",
"virNodeDevice" : "virNodeDeviceFree",
"virSecret": "virSecretFree",
"virNWFilter": "virNWFilterFree",
"virDomainSnapshot": "virDomainSnapshotFree",
# We hand-craft __del__ for this one
#"virStream": "virStreamFree",
}
class_skip_connect_impl = {
"virConnect" : True,
"virDomainSnapshot": True,
}
class_domain_impl = {
"virDomainSnapshot": True,
}
functions_noexcept = {
'virDomainGetID': True,
'virDomainGetName': True,
'virNetworkGetName': True,
'virInterfaceGetName': True,
'virStoragePoolGetName': True,
'virStorageVolGetName': True,
'virStorageVolGetkey': True,
'virNodeDeviceGetName': True,
'virNodeDeviceGetParent': True,
'virSecretGetUsageType': True,
'virSecretGetUsageID': True,
'virNWFilterGetName': True,
}
reference_keepers = {
}
function_classes = {}
function_classes["None"] = []
function_post = {}
# Functions returning an integral type which need special rules to
# check for errors and raise exceptions.
functions_int_exception_test = {
'virDomainGetMaxMemory': "%s == 0",
}
functions_int_default_test = "%s == -1"
def is_integral_type (name):
return not re.search ("^(unsigned)? ?(int|long)$", name) is None
# Functions returning lists which need special rules to check for errors
# and raise exceptions.
functions_list_exception_test = {
}
functions_list_default_test = "%s is None"
def is_list_type (name):
whitelist = [ "virDomainBlockStats",
"virDomainInterfaceStats" ]
return name[-1:] == "*" or name in whitelist
def nameFixup(name, classe, type, file):
# avoid a desastrous clash
listname = classe + "List"
ll = len(listname)
l = len(classe)
if name[0:l] == listname:
func = name[l:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:16] == "virNetworkDefine":
func = name[3:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:19] == "virNetworkCreateXML":
func = name[3:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:16] == "virNetworkLookup":
func = name[3:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:18] == "virInterfaceDefine":
func = name[3:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:21] == "virInterfaceCreateXML":
func = name[3:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:18] == "virInterfaceLookup":
func = name[3:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:15] == "virSecretDefine":
func = name[3:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:15] == "virSecretLookup":
func = name[3:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:17] == "virNWFilterDefine":
func = name[3:]
func = string.lower(func[0:3]) + func[3:]
elif name[0:17] == "virNWFilterLookup":
func = name[3:]
func = string.lower(func[0:3]) + func[3:]
elif name[0:20] == "virStoragePoolDefine":
func = name[3:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:23] == "virStoragePoolCreateXML":
func = name[3:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:20] == "virStoragePoolLookup":
func = name[3:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:19] == "virStorageVolDefine":
func = name[3:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:19] == "virStorageVolLookup":
func = name[3:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:12] == "virDomainGet":
func = name[12:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:29] == "virDomainSnapshotLookupByName":
func = name[9:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:26] == "virDomainSnapshotListNames":
func = name[9:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:20] == "virDomainSnapshotNum":
func = name[9:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:26] == "virDomainSnapshotCreateXML":
func = name[9:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:24] == "virDomainSnapshotCurrent":
func = name[9:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:17] == "virDomainSnapshot":
func = name[17:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:9] == "virDomain":
func = name[9:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:13] == "virNetworkGet":
func = name[13:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:10] == "virNetwork":
func = name[10:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:15] == "virInterfaceGet":
func = name[15:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:12] == "virInterface":
func = name[12:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:12] == 'virSecretGet':
func = name[12:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:9] == 'virSecret':
func = name[9:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:14] == 'virNWFilterGet':
func = name[14:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:11] == 'virNWFilter':
func = name[11:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:12] == 'virStreamNew':
func = "newStream"
elif name[0:9] == 'virStream':
func = name[9:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:17] == "virStoragePoolGet":
func = name[17:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:14] == "virStoragePool":
func = name[14:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:16] == "virStorageVolGet":
func = name[16:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:13] == "virStorageVol":
func = name[13:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:13] == "virNodeDevice":
if name[13:16] == "Get":
func = string.lower(name[16]) + name[17:]
elif name[13:19] == "Lookup" or name[13:19] == "Create":
func = string.lower(name[3]) + name[4:]
else:
func = string.lower(name[13]) + name[14:]
elif name[0:7] == "virNode":
func = name[7:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:10] == "virConnect":
func = name[10:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:3] == "xml":
func = name[3:]
func = string.lower(func[0:1]) + func[1:]
else:
func = name
if func == "iD":
func = "ID"
if func == "uUID":
func = "UUID"
if func == "uUIDString":
func = "UUIDString"
if func == "oSType":
func = "OSType"
if func == "xMLDesc":
func = "XMLDesc"
if func == "mACString":
func = "MACString"
return func
def functionCompare(info1, info2):
(index1, func1, name1, ret1, args1, file1) = info1
(index2, func2, name2, ret2, args2, file2) = info2
if file1 == file2:
if func1 < func2:
return -1
if func1 > func2:
return 1
if file1 == "python_accessor":
return -1
if file2 == "python_accessor":
return 1
if file1 < file2:
return -1
if file1 > file2:
return 1
return 0
def writeDoc(name, args, indent, output):
if functions[name][0] is None or functions[name][0] == "":
return
val = functions[name][0]
val = string.replace(val, "NULL", "None");
output.write(indent)
output.write('"""')
i = string.find(val, "\n")
while i >= 0:
str = val[0:i+1]
val = val[i+1:]
output.write(str)
i = string.find(val, "\n")
output.write(indent)
output.write(val)
output.write(' """\n')
def buildWrappers():
global ctypes
global py_types
global py_return_types
global unknown_types
global functions
global function_classes
global classes_type
global classes_list
global converter_type
global primary_classes
global converter_type
global classes_ancestor
global converter_type
global primary_classes
global classes_ancestor
global classes_destructors
global functions_noexcept
for type in classes_type.keys():
function_classes[classes_type[type][2]] = []
#
# Build the list of C types to look for ordered to start
# with primary classes
#
ctypes = []
classes_list = []
ctypes_processed = {}
classes_processed = {}
for classe in primary_classes:
classes_list.append(classe)
classes_processed[classe] = ()
for type in classes_type.keys():
tinfo = classes_type[type]
if tinfo[2] == classe:
ctypes.append(type)
ctypes_processed[type] = ()
for type in classes_type.keys():
if ctypes_processed.has_key(type):
continue
tinfo = classes_type[type]
if not classes_processed.has_key(tinfo[2]):
classes_list.append(tinfo[2])
classes_processed[tinfo[2]] = ()
ctypes.append(type)
ctypes_processed[type] = ()
for name in functions.keys():
found = 0;
(desc, ret, args, file, cond) = functions[name]
for type in ctypes:
classe = classes_type[type][2]
if name[0:3] == "vir" and len(args) >= 1 and args[0][1] == type:
found = 1
func = nameFixup(name, classe, type, file)
info = (0, func, name, ret, args, file)
function_classes[classe].append(info)
elif name[0:3] == "vir" and len(args) >= 2 and args[1][1] == type \
and file != "python_accessor" and not name in function_skip_index_one:
found = 1
func = nameFixup(name, classe, type, file)
info = (1, func, name, ret, args, file)
function_classes[classe].append(info)
if found == 1:
continue
func = nameFixup(name, "None", file, file)
info = (0, func, name, ret, args, file)
function_classes['None'].append(info)
classes = open("libvirt.py", "w")
extra = open(os.path.join(srcPref,"libvirt-override.py"), "r")
classes.write("#! " + python + " -i\n")
classes.write("#\n")
classes.write("# WARNING WARNING WARNING WARNING\n")
classes.write("#\n")
classes.write("# This file is automatically written by generator.py. Any changes\n")
classes.write("# made here will be lost.\n")
classes.write("#\n")
classes.write("# To change the manually written methods edit libvirt-override.py\n")
classes.write("# To change the automatically written methods edit generator.py\n")
classes.write("#\n")
classes.write("# WARNING WARNING WARNING WARNING\n")
classes.write("#\n")
classes.writelines(extra.readlines())
classes.write("#\n")
classes.write("# WARNING WARNING WARNING WARNING\n")
classes.write("#\n")
classes.write("# Automatically written part of python bindings for libvirt\n")
classes.write("#\n")
classes.write("# WARNING WARNING WARNING WARNING\n")
extra.close()
if function_classes.has_key("None"):
flist = function_classes["None"]
flist.sort(functionCompare)
oldfile = ""
for info in flist:
(index, func, name, ret, args, file) = info
if file != oldfile:
classes.write("#\n# Functions from module %s\n#\n\n" % file)
oldfile = file
classes.write("def %s(" % func)
n = 0
for arg in args:
if n != 0:
classes.write(", ")
classes.write("%s" % arg[0])
n = n + 1
classes.write("):\n")
writeDoc(name, args, ' ', classes);
for arg in args:
if classes_type.has_key(arg[1]):
classes.write(" if %s is None: %s__o = None\n" %
(arg[0], arg[0]))
classes.write(" else: %s__o = %s%s\n" %
(arg[0], arg[0], classes_type[arg[1]][0]))
if ret[0] != "void":
classes.write(" ret = ");
else:
classes.write(" ");
classes.write("libvirtmod.%s(" % name)
n = 0
for arg in args:
if n != 0:
classes.write(", ");
classes.write("%s" % arg[0])
if classes_type.has_key(arg[1]):
classes.write("__o");
n = n + 1
classes.write(")\n");
if ret[0] != "void":
if classes_type.has_key(ret[0]):
#
# Raise an exception
#
if functions_noexcept.has_key(name):
classes.write(" if ret is None:return None\n");
else:
classes.write(
" if ret is None:raise libvirtError('%s() failed')\n" %
(name))
classes.write(" return ");
classes.write(classes_type[ret[0]][1] % ("ret"));
classes.write("\n");
# For functions returning an integral type there are
# several things that we can do, depending on the
# contents of functions_int_*:
elif is_integral_type (ret[0]):
if not functions_noexcept.has_key (name):
if functions_int_exception_test.has_key (name):
test = functions_int_exception_test[name]
else:
test = functions_int_default_test
classes.write ((" if " + test +
": raise libvirtError ('%s() failed')\n") %
("ret", name))
classes.write(" return ret\n")
elif is_list_type (ret[0]):
if not functions_noexcept.has_key (name):
if functions_list_exception_test.has_key (name):
test = functions_list_exception_test[name]
else:
test = functions_list_default_test
classes.write ((" if " + test +
": raise libvirtError ('%s() failed')\n") %
("ret", name))
classes.write(" return ret\n")
else:
classes.write(" return ret\n")
classes.write("\n");
for classname in classes_list:
if classname == "None":
pass
else:
if classes_ancestor.has_key(classname):
classes.write("class %s(%s):\n" % (classname,
classes_ancestor[classname]))
classes.write(" def __init__(self, _obj=None):\n")
if reference_keepers.has_key(classname):
rlist = reference_keepers[classname]
for ref in rlist:
classes.write(" self.%s = None\n" % ref[1])
classes.write(" self._o = _obj\n")
classes.write(" %s.__init__(self, _obj=_obj)\n\n" % (
classes_ancestor[classname]))
else:
classes.write("class %s:\n" % (classname))
if classname in [ "virDomain", "virNetwork", "virInterface", "virStoragePool",
"virStorageVol", "virNodeDevice", "virSecret","virStream",
"virNWFilter" ]:
classes.write(" def __init__(self, conn, _obj=None):\n")
elif classname in [ 'virDomainSnapshot' ]:
classes.write(" def __init__(self, dom, _obj=None):\n")
else:
classes.write(" def __init__(self, _obj=None):\n")
if reference_keepers.has_key(classname):
list = reference_keepers[classname]
for ref in list:
classes.write(" self.%s = None\n" % ref[1])
if classname in [ "virDomain", "virNetwork", "virInterface",
"virNodeDevice", "virSecret", "virStream",
"virNWFilter" ]:
classes.write(" self._conn = conn\n")
elif classname in [ "virStorageVol", "virStoragePool" ]:
classes.write(" self._conn = conn\n" + \
" if not isinstance(conn, virConnect):\n" + \
" self._conn = conn._conn\n")
elif classname in [ "virDomainSnapshot" ]:
classes.write(" self._dom = dom\n")
classes.write(" if _obj != None:self._o = _obj;return\n")
classes.write(" self._o = None\n\n");
destruct=None
if classes_destructors.has_key(classname):
classes.write(" def __del__(self):\n")
classes.write(" if self._o != None:\n")
classes.write(" libvirtmod.%s(self._o)\n" %
classes_destructors[classname]);
classes.write(" self._o = None\n\n");
destruct=classes_destructors[classname]
if not class_skip_connect_impl.has_key(classname):
# Build python safe 'connect' method
classes.write(" def connect(self):\n")
classes.write(" return self._conn\n\n")
if class_domain_impl.has_key(classname):
classes.write(" def domain(self):\n")
classes.write(" return self._dom\n\n")
flist = function_classes[classname]
flist.sort(functionCompare)
oldfile = ""
for info in flist:
(index, func, name, ret, args, file) = info
#
# Do not provide as method the destructors for the class
# to avoid double free
#
if name == destruct:
continue;
if file != oldfile:
if file == "python_accessor":
classes.write(" # accessors for %s\n" % (classname))
else:
classes.write(" #\n")
classes.write(" # %s functions from module %s\n" % (
classname, file))
classes.write(" #\n\n")
oldfile = file
classes.write(" def %s(self" % func)
n = 0
for arg in args:
if n != index:
classes.write(", %s" % arg[0])
n = n + 1
classes.write("):\n")
writeDoc(name, args, ' ', classes);
n = 0
for arg in args:
if classes_type.has_key(arg[1]):
if n != index:
classes.write(" if %s is None: %s__o = None\n" %
(arg[0], arg[0]))
classes.write(" else: %s__o = %s%s\n" %
(arg[0], arg[0], classes_type[arg[1]][0]))
n = n + 1
if ret[0] != "void":
classes.write(" ret = ");
else:
classes.write(" ");
classes.write("libvirtmod.%s(" % name)
n = 0
for arg in args:
if n != 0:
classes.write(", ");
if n != index:
classes.write("%s" % arg[0])
if classes_type.has_key(arg[1]):
classes.write("__o");
else:
classes.write("self");
if classes_type.has_key(arg[1]):
classes.write(classes_type[arg[1]][0])
n = n + 1
classes.write(")\n");
if name == "virConnectClose":
classes.write(" self._o = None\n")
# For functions returning object types:
if ret[0] != "void":
if classes_type.has_key(ret[0]):
#
# Raise an exception
#
if functions_noexcept.has_key(name):
classes.write(
" if ret is None:return None\n");
else:
if classname == "virConnect":
classes.write(
" if ret is None:raise libvirtError('%s() failed', conn=self)\n" %
(name))
elif classname == "virDomain":
classes.write(
" if ret is None:raise libvirtError('%s() failed', dom=self)\n" %
(name))
elif classname == "virNetwork":
classes.write(
" if ret is None:raise libvirtError('%s() failed', net=self)\n" %
(name))
elif classname == "virInterface":
classes.write(
" if ret is None:raise libvirtError('%s() failed', net=self)\n" %
(name))
elif classname == "virStoragePool":
classes.write(
" if ret is None:raise libvirtError('%s() failed', pool=self)\n" %
(name))
elif classname == "virStorageVol":
classes.write(
" if ret is None:raise libvirtError('%s() failed', vol=self)\n" %
(name))
elif classname == "virDomainSnapshot":
classes.write(
" if ret is None:raise libvirtError('%s() failed', dom=self._dom)\n" %
(name))
else:
classes.write(
" if ret is None:raise libvirtError('%s() failed')\n" %
(name))
#
# generate the returned class wrapper for the object
#
classes.write(" __tmp = ");
classes.write(classes_type[ret[0]][1] % ("ret"));
classes.write("\n");
#
# Sometime one need to keep references of the source
# class in the returned class object.
# See reference_keepers for the list
#
tclass = classes_type[ret[0]][2]
if reference_keepers.has_key(tclass):
list = reference_keepers[tclass]
for pref in list:
if pref[0] == classname:
classes.write(" __tmp.%s = self\n" %
pref[1])
# Post-processing - just before we return.
if function_post.has_key(name):
classes.write(" %s\n" %
(function_post[name]));
#
# return the class
#
classes.write(" return __tmp\n");
elif converter_type.has_key(ret[0]):
#
# Raise an exception
#
if functions_noexcept.has_key(name):
classes.write(
" if ret is None:return None");
# Post-processing - just before we return.
if function_post.has_key(name):
classes.write(" %s\n" %
(function_post[name]));
classes.write(" return ");
classes.write(converter_type[ret[0]] % ("ret"));
classes.write("\n");
# For functions returning an integral type there
# are several things that we can do, depending on
# the contents of functions_int_*:
elif is_integral_type (ret[0]):
if not functions_noexcept.has_key (name):
if functions_int_exception_test.has_key (name):
test = functions_int_exception_test[name]
else:
test = functions_int_default_test
if classname == "virConnect":
classes.write ((" if " + test +
": raise libvirtError ('%s() failed', conn=self)\n") %
("ret", name))
elif classname == "virDomain":
classes.write ((" if " + test +
": raise libvirtError ('%s() failed', dom=self)\n") %
("ret", name))
elif classname == "virNetwork":
classes.write ((" if " + test +
": raise libvirtError ('%s() failed', net=self)\n") %
("ret", name))
elif classname == "virInterface":
classes.write ((" if " + test +
": raise libvirtError ('%s() failed', net=self)\n") %
("ret", name))
elif classname == "virStoragePool":
classes.write ((" if " + test +
": raise libvirtError ('%s() failed', pool=self)\n") %
("ret", name))
elif classname == "virStorageVol":
classes.write ((" if " + test +
": raise libvirtError ('%s() failed', vol=self)\n") %
("ret", name))
else:
classes.write ((" if " + test +
": raise libvirtError ('%s() failed')\n") %
("ret", name))
# Post-processing - just before we return.
if function_post.has_key(name):
classes.write(" %s\n" %
(function_post[name]));
classes.write (" return ret\n")
elif is_list_type (ret[0]):
if not functions_noexcept.has_key (name):
if functions_list_exception_test.has_key (name):
test = functions_list_exception_test[name]
else:
test = functions_list_default_test
if classname == "virConnect":
classes.write ((" if " + test +
": raise libvirtError ('%s() failed', conn=self)\n") %
("ret", name))
elif classname == "virDomain":
classes.write ((" if " + test +
": raise libvirtError ('%s() failed', dom=self)\n") %
("ret", name))
elif classname == "virNetwork":
classes.write ((" if " + test +
": raise libvirtError ('%s() failed', net=self)\n") %
("ret", name))
elif classname == "virInterface":
classes.write ((" if " + test +
": raise libvirtError ('%s() failed', net=self)\n") %
("ret", name))
elif classname == "virStoragePool":
classes.write ((" if " + test +
": raise libvirtError ('%s() failed', pool=self)\n") %
("ret", name))
elif classname == "virStorageVol":
classes.write ((" if " + test +
": raise libvirtError ('%s() failed', vol=self)\n") %
("ret", name))
else:
classes.write ((" if " + test +
": raise libvirtError ('%s() failed')\n") %
("ret", name))
# Post-processing - just before we return.
if function_post.has_key(name):
classes.write(" %s\n" %
(function_post[name]));
classes.write (" return ret\n")
else:
# Post-processing - just before we return.
if function_post.has_key(name):
classes.write(" %s\n" %
(function_post[name]));
classes.write(" return ret\n");
classes.write("\n");
# Append "<classname>.py" to class def, iff it exists
try:
extra = open(os.path.join(srcPref,"libvirt-override-" + classname + ".py"), "r")
classes.write (" #\n")
classes.write (" # %s methods from %s.py (hand coded)\n" % (classname,classname))
classes.write (" #\n")
classes.writelines(extra.readlines())
classes.write("\n")
extra.close()
except:
pass
#
# Generate enum constants
#
for type,enum in enums.items():
classes.write("# %s\n" % type)
items = enum.items()
items.sort(lambda i1,i2: cmp(long(i1[1]),long(i2[1])))
for name,value in items:
classes.write("%s = %s\n" % (name,value))
classes.write("\n");
classes.close()
if buildStubs() < 0:
sys.exit(1)
buildWrappers()
sys.exit(0)
|
#!/usr/bin/env python
from __future__ import print_function
import codecs
from itertools import izip, izip_longest
from math import isnan
from os import listdir, makedirs
from os.path import basename, dirname, isdir, join, splitext
from shutil import rmtree
import sys
from xlsxwriter import Workbook
# A hackish way to import the configuration
sys.path.append(dirname(__file__))
from configuration import *
#-------------------------------------------------------------------------------
ENCODING = 'ascii'
errors = { }
def main():
setup_spreadsheets_dir()
passed = [ fname for fname in files_to_check() if check(fname) ]
show_summary(passed)
def setup_spreadsheets_dir():
if isdir(SPREADSHEETS_DIR):
rmtree(SPREADSHEETS_DIR)
makedirs(SPREADSHEETS_DIR)
# FIXME Add ignore.txt with files to ignore; finally, add warning if tests
# were skipped
def files_to_check():
etalons = { f for f in listdir(ETALON_DIR) if f.endswith(EXTENSION) }
tocomps = { f for f in listdir(TOCOMP_DIR) if f.endswith(EXTENSION) }
etalon_only = etalons - tocomps
tocomp_only = tocomps - etalons
for e in etalon_only:
log_error(e, 'only etalon found but the test file is missing')
for t in tocomp_only:
log_error(t, 'the etalon is missing for this test file')
return sorted(etalons & tocomps)
def check(filename):
etalon = get_content(ETALON_DIR, filename, 'etalon')
if etalon is None:
return
tocomp = get_content(TOCOMP_DIR, filename, 'test')
if tocomp is None:
return
mismatch = compare_headers(etalon, tocomp)
if mismatch:
log_error(filename, 'mismatch in headers, excel sheet written')
write_mismatch(filename, etalon, tocomp, mismatch)
return
etalon_len, tocomp_len = number_of_rows(etalon, tocomp)
if etalon_len!=tocomp_len:
msg = 'number of rows: {}!={}'.format(etalon_len, tocomp_len)
log_error(filename, msg)
return
mismatch = compare_values(etalon, tocomp)
if mismatch:
log_error(filename, 'mismatch, excel sheet written')
write_mismatch(filename, etalon, tocomp, mismatch)
return
return True
def log_error(filename, msg):
assert filename not in errors, filename
errors[filename] = msg
def get_content(directory, filename, kind):
header, lines = read_csv( join(directory,filename) )
col_types, error_msg = get_col_types(header)
if error_msg:
log_error(filename, '{}, header: {}'.format(kind, error_msg))
return
# FIXME check row length == header length!
table, type_errors = convert(col_types, lines)
if type_errors:
msg = '{}, type conversion errors, excel sheet written'.format(kind)
log_error(filename, msg)
xlsxname = get_filebase(filename) + '_'+kind+'_type_error.xlsx'
write_cell_errors(xlsxname, header, lines, type_errors)
return
return header, table
def read_csv(filename):
print()
print('Trying to read file "{}"'.format(filename))
with codecs.open(filename, 'r', ENCODING) as f:
header = extract_first_line(f)
lines = [ split(line) for line in f ]
print('Read {} lines'.format( bool(header) + len(lines) ))
return header, lines
def extract_first_line(f):
header = next(f, None)
return split(header) if header else [ ]
def split(line):
return line.rstrip('\r\n').split(SEP)
def get_col_types(header):
# Returns ([type converters], error message). Exactly one of them is None.
if len(header)==0:
return None, 'missing'
col_types = [ TO_TYPE.get(col[-1:], None) for col in header ]
for i, typ in enumerate(col_types):
if typ is None:
msg = 'unrecognized type in column {}: "{}"'.format(i+1, header[i])
return None, msg
assert len(col_types)==len(header)
return col_types, None
def convert(col_types, lines):
# Returns the tuple of: lines converted to a 2D table with proper types, and
# the cell indices where type conversion error occured.
table, type_errors = [ ], [ ]
for i, line in enumerate(lines,1):
row = [ ]
for j, col in enumerate(line):
try:
row.append( col_types[j](col) )
except:
row.append( None )
type_errors.append((i,j))
assert len(row)==len(col_types)
table.append(row)
return table if len(type_errors)==0 else [ ], type_errors
def get_filebase(path):
return splitext(basename(path))[0]
def write_cell_errors(xlsxname, header, lines, cells_to_mark):
workbook = Workbook(join(SPREADSHEETS_DIR, xlsxname))
cell_fmt = workbook.add_format()
cell_fmt.set_bg_color('cyan')
worksheet = workbook.add_worksheet()
write_sheet(worksheet, cell_fmt, header, lines, cells_to_mark)
workbook.close()
def write_mismatch(filename, etalon, tocomp, mismatch):
workbook = Workbook(join(SPREADSHEETS_DIR, get_filebase(filename)+'.xlsx'))
cell_fmt = workbook.add_format()
cell_fmt.set_bg_color('cyan')
worksheet = workbook.add_worksheet(name='test')
write_sheet(worksheet, cell_fmt, *tocomp, cells_to_mark=mismatch)
worksheet = workbook.add_worksheet(name='etalon')
write_sheet(worksheet, cell_fmt, *etalon)
workbook.close()
def write_sheet(worksheet, cell_fmt, header, lines, cells_to_mark=[]):
formatter = { cell : cell_fmt for cell in cells_to_mark }
for j, col_header in enumerate(header):
worksheet.write(0, j, col_header, formatter.get((0,j), None))
for i, line in enumerate(lines, 1):
for j, item in enumerate(line):
worksheet.write(i,j, replace_nan(item), formatter.get((i,j),None))
def replace_nan(item):
return 'NaN' if isinstance(item, float) and isnan(item) else item
def compare_headers(etalon, tocomp):
mismatch = [ ]
e_head, _ = etalon
t_head, _ = tocomp
for i, (eh, th) in enumerate(izip_longest(e_head, t_head, fillvalue='')):
if eh!=th:
mismatch.append((0,i))
return mismatch
def number_of_rows(etalon, tocomp):
return len(etalon[1]), len(tocomp[1])
def compare_values(etalon, tocomp):
mismatch = [ ]
_, e_table = etalon
_, t_table = tocomp
for i, (e_row, t_row) in enumerate(izip(e_table, t_table), 1):
for j, (e_item, t_item) in enumerate(izip(e_row, t_row)):
if not equals(e_item, t_item):
mismatch.append((i,j))
return mismatch
def equals(e, t):
return compare_floats(e, t) if isinstance(e, float) else e==t
def compare_floats(e, t):
e_nan, t_nan = isnan(e), isnan(t)
if e_nan and t_nan:
return True
elif e_nan or t_nan:
return False
else:
assert not e_nan and not t_nan
diff = abs(e-t)
return diff < ABS_TOL or diff < REL_TOL*abs(e)
def show_summary(passed):
print('-------------------------------------------------------------------')
if passed:
print('Passed: {} tests'.format(len(passed)))
if errors:
msgs = sorted( errors.iteritems() )
print('There were errors:')
for fname, msg in msgs:
print(' {} {}'.format(fname,msg))
# FIXME write_errors into a log file to the results directory as well!
# Write the etalon and test dirs on the top of that log file
print('Tests FAILED!')
else:
print('Tests PASSED!')
# FIXME Check if etalon and test are the same, warn if yes!
if __name__ == '__main__':
main()
Improved logging of errors
#!/usr/bin/env python
from __future__ import print_function
import codecs
from contextlib import closing
from cStringIO import StringIO
from itertools import izip, izip_longest
from math import isnan
from os import listdir, makedirs
from os.path import basename, dirname, isdir, join, splitext
from shutil import rmtree
import sys
from xlsxwriter import Workbook
# A hackish way to import the configuration
sys.path.append(dirname(__file__))
from configuration import *
#-------------------------------------------------------------------------------
ENCODING = 'ascii'
errors = { }
def main():
setup_spreadsheets_dir()
passed = [ fname for fname in files_to_check() if check(fname) ]
show_summary(passed)
def setup_spreadsheets_dir():
if isdir(SPREADSHEETS_DIR):
rmtree(SPREADSHEETS_DIR)
makedirs(SPREADSHEETS_DIR)
# FIXME Add ignore.txt with files to ignore; finally, add warning if tests
# were skipped
def files_to_check():
etalons = { f for f in listdir(ETALON_DIR) if f.endswith(EXTENSION) }
tocomps = { f for f in listdir(TOCOMP_DIR) if f.endswith(EXTENSION) }
etalon_only = etalons - tocomps
tocomp_only = tocomps - etalons
for e in etalon_only:
log_error(e, 'only etalon found but the test file is missing')
for t in tocomp_only:
log_error(t, 'the etalon is missing for this test file')
return sorted(etalons & tocomps)
def check(filename):
etalon = get_content(ETALON_DIR, filename, 'etalon')
if etalon is None:
return
tocomp = get_content(TOCOMP_DIR, filename, 'test')
if tocomp is None:
return
mismatch = compare_headers(etalon, tocomp)
if mismatch:
log_error(filename, 'mismatch in headers, excel sheet written')
write_mismatch(filename, etalon, tocomp, mismatch)
return
etalon_len, tocomp_len = number_of_rows(etalon, tocomp)
if etalon_len!=tocomp_len:
msg = 'number of rows: {}!={}'.format(etalon_len, tocomp_len)
log_error(filename, msg)
return
mismatch = compare_values(etalon, tocomp)
if mismatch:
log_error(filename, 'mismatch, excel sheet written')
write_mismatch(filename, etalon, tocomp, mismatch)
return
return True
def log_error(filename, msg):
assert filename not in errors, filename
errors[filename] = msg
def get_content(directory, filename, kind):
header, lines = read_csv( join(directory,filename) )
col_types, error_msg = get_col_types(header)
if error_msg:
log_error(filename, '{}, header: {}'.format(kind, error_msg))
return
# FIXME check row length == header length!
table, type_errors = convert(col_types, lines)
if type_errors:
msg = '{}, type conversion errors, excel sheet written'.format(kind)
log_error(filename, msg)
xlsxname = get_filebase(filename) + '_'+kind+'_type_error.xlsx'
write_cell_errors(xlsxname, header, lines, type_errors)
return
return header, table
def read_csv(filename):
print()
print('Trying to read file "{}"'.format(filename))
with codecs.open(filename, 'r', ENCODING) as f:
header = extract_first_line(f)
lines = [ split(line) for line in f ]
print('Read {} lines'.format( bool(header) + len(lines) ))
return header, lines
def extract_first_line(f):
header = next(f, None)
return split(header) if header else [ ]
def split(line):
return line.rstrip('\r\n').split(SEP)
def get_col_types(header):
# Returns ([type converters], error message). Exactly one of them is None.
if len(header)==0:
return None, 'missing'
col_types = [ TO_TYPE.get(col[-1:], None) for col in header ]
for i, typ in enumerate(col_types):
if typ is None:
msg = 'unrecognized type in column {}: "{}"'.format(i+1, header[i])
return None, msg
assert len(col_types)==len(header)
return col_types, None
def convert(col_types, lines):
# Returns the tuple of: lines converted to a 2D table with proper types, and
# the cell indices where type conversion error occured.
table, type_errors = [ ], [ ]
for i, line in enumerate(lines,1):
row = [ ]
for j, col in enumerate(line):
try:
row.append( col_types[j](col) )
except:
row.append( None )
type_errors.append((i,j))
assert len(row)==len(col_types)
table.append(row)
return table if len(type_errors)==0 else [ ], type_errors
def get_filebase(path):
return splitext(basename(path))[0]
def write_cell_errors(xlsxname, header, lines, cells_to_mark):
workbook = Workbook(join(SPREADSHEETS_DIR, xlsxname))
cell_fmt = workbook.add_format()
cell_fmt.set_bg_color('cyan')
worksheet = workbook.add_worksheet()
write_sheet(worksheet, cell_fmt, header, lines, cells_to_mark)
workbook.close()
def write_mismatch(filename, etalon, tocomp, mismatch):
workbook = Workbook(join(SPREADSHEETS_DIR, get_filebase(filename)+'.xlsx'))
cell_fmt = workbook.add_format()
cell_fmt.set_bg_color('cyan')
worksheet = workbook.add_worksheet(name='test')
write_sheet(worksheet, cell_fmt, *tocomp, cells_to_mark=mismatch)
worksheet = workbook.add_worksheet(name='etalon')
write_sheet(worksheet, cell_fmt, *etalon)
workbook.close()
def write_sheet(worksheet, cell_fmt, header, lines, cells_to_mark=[]):
formatter = { cell : cell_fmt for cell in cells_to_mark }
for j, col_header in enumerate(header):
worksheet.write(0, j, col_header, formatter.get((0,j), None))
for i, line in enumerate(lines, 1):
for j, item in enumerate(line):
worksheet.write(i,j, replace_nan(item), formatter.get((i,j),None))
def replace_nan(item):
return 'NaN' if isinstance(item, float) and isnan(item) else item
def compare_headers(etalon, tocomp):
mismatch = [ ]
e_head, _ = etalon
t_head, _ = tocomp
for i, (eh, th) in enumerate(izip_longest(e_head, t_head, fillvalue='')):
if eh!=th:
mismatch.append((0,i))
return mismatch
def number_of_rows(etalon, tocomp):
return len(etalon[1]), len(tocomp[1])
def compare_values(etalon, tocomp):
mismatch = [ ]
_, e_table = etalon
_, t_table = tocomp
for i, (e_row, t_row) in enumerate(izip(e_table, t_table), 1):
for j, (e_item, t_item) in enumerate(izip(e_row, t_row)):
if not equals(e_item, t_item):
mismatch.append((i,j))
return mismatch
def equals(e, t):
return compare_floats(e, t) if isinstance(e, float) else e==t
def compare_floats(e, t):
e_nan, t_nan = isnan(e), isnan(t)
if e_nan and t_nan:
return True
elif e_nan or t_nan:
return False
else:
assert not e_nan and not t_nan
diff = abs(e-t)
return diff < ABS_TOL or diff < REL_TOL*abs(e)
def show_summary(passed):
print('-------------------------------------------------------------------')
print('Etalon directory:', ETALON_DIR)
print('Compared against:', TOCOMP_DIR)
if passed:
print('Passed: {} files'.format(len(passed)))
if errors:
log = create_error_log()
print(log)
write_errors(log)
print('Tests FAILED! Check "{}"'.format(SPREADSHEETS_DIR))
else:
print('Tests PASSED!')
if ETALON_DIR==TOCOMP_DIR:
print('WARNING: The etalon directory has been compared to itself!')
def create_error_log():
with closing(StringIO()) as out:
out.write('There were errors:\n')
for fname, msg in sorted( errors.iteritems() ):
out.write(' {} {}\n'.format(fname,msg))
return out.getvalue()
def write_errors(log):
logfile_name = join(SPREADSHEETS_DIR, 'log.txt')
with open(logfile_name, 'w') as f:
f.write('Etalon directory: {}\n'.format(ETALON_DIR))
f.write('Compared against: {}\n'.format(TOCOMP_DIR))
f.write(log)
if __name__ == '__main__':
main()
|
# django imports
from django.core.exceptions import ObjectDoesNotExist
from django.core.cache import cache
from django.core.urlresolvers import reverse
# lfs imports
from lfs.cart.models import CartItem
from lfs.cart.models import Cart
from lfs.payment import utils as payment_utils
from lfs.shipping import utils as shipping_utils
def get_cart_max_delivery_time(request, cart):
"""Returns the delivery time object with the maximal delivery time of all
products within the cart. Takes the selected shipping method into account.
This is used within the cart to display the maximal delivery time.
"""
max_delivery_time = None
for item in cart.items():
# Calculate the delivery time of the product. Takes the selected
# shipping method into account.
delivery_time = shipping_utils.get_product_delivery_time(
request, item.product.slug, for_cart=True)
if (max_delivery_time is None) or \
(delivery_time.as_hours() > max_delivery_time.as_hours()):
max_delivery_time = delivery_time
return max_delivery_time
# TODO: Remove cart from signature?
def get_cart_price(request, cart, total=False):
"""Returns price of the given cart.
"""
return get_cart_costs(request, cart, total)["price"]
def get_cart_costs(request, cart, total=False):
"""Returns a dictionary with price and tax of the given cart:
returns {
"price" : the cart's price,
"tax" : the cart's tax,
}
"""
if cart is None:
return {"price" : 0, "tax" : 0}
cache_key = "cart-costs-%s-%s" % (total, cart.id)
cart_costs = cache.get(cache_key)
if cart_costs is None:
cart_price = 0
cart_tax = 0
for item in cart.items():
cart_price += item.get_price()
cart_tax += item.get_tax()
if total:
# Shipping
shipping_method = shipping_utils.get_selected_shipping_method(request)
shipping_costs = shipping_utils.get_shipping_costs(request, shipping_method)
cart_price += shipping_costs["price"]
cart_tax += shipping_costs["tax"]
# Payment
payment_method = payment_utils.get_selected_payment_method(request)
payment_costs = payment_utils.get_payment_costs(request, payment_method)
cart_price += payment_costs["price"]
cart_tax += payment_costs["tax"]
cart_costs = {"price" : cart_price, "tax" : cart_tax}
cache.set(cache_key, cart_costs)
return cart_costs
def get_or_create_cart(request):
"""Returns the cart of the current user. If no cart exists it creates a new
one first.
"""
cart = get_cart(request)
if cart is None:
cart = create_cart(request)
return cart
def create_cart(request):
"""Creates a cart for the current session and/or user.
"""
cart = Cart(session = request.session.session_key)
if request.user.is_authenticated():
cart.user = request.user
cart.save()
return cart
def get_cart(request):
"""Returns the cart of the current customer or None.
"""
session_key = request.session.session_key
user = request.user
if user.is_authenticated():
try:
cart = cache.get("cart-%s" % user)
if cart is None:
cart = Cart.objects.get(user = user)
cache.set("cart-%s" % user, cart)
return cart
except ObjectDoesNotExist:
return None
else:
try:
cart = cache.get("cart-%s" % session_key)
if cart is None:
cart = Cart.objects.get(session = session_key)
cache.set("cart-%s" % session_key, cart)
return cart
except ObjectDoesNotExist:
return None
def get_go_on_shopping_url(request):
"""Calculates the go on shopping url.
"""
lc = request.session.get("last_category")
if lc:
return lc.get_absolute_url()
else:
return reverse("lfs_shop_view")
def update_cart_after_login(request):
"""Updates the cart after login.
1. if there is no session cart, nothing has to be done.
2. if there is a session cart and no user cart we assign the session cart
to the current user.
3. if there is a session cart and a user cart we add the session cart items
to the user cart.
"""
try:
session_cart = Cart.objects.get(session = request.session.session_key)
try:
user_cart = Cart.objects.get(user = request.user)
except ObjectDoesNotExist:
session_cart.user = request.user
session_cart.save()
else:
for session_cart_item in session_cart.items():
try:
user_cart_item = CartItem.objects.get(cart = user_cart, product = session_cart_item.product)
except ObjectDoesNotExist:
session_cart_item.cart = user_cart
session_cart_item.save()
else:
user_cart_item.amount += session_cart_item.amount
user_cart_item.save()
session_cart.delete()
except ObjectDoesNotExist:
pass
lfs.cart, bugfix: don't add shipping or payment price if the cart is empty
# django imports
from django.core.exceptions import ObjectDoesNotExist
from django.core.cache import cache
from django.core.urlresolvers import reverse
# lfs imports
from lfs.cart.models import CartItem
from lfs.cart.models import Cart
from lfs.payment import utils as payment_utils
from lfs.shipping import utils as shipping_utils
def get_cart_max_delivery_time(request, cart):
"""Returns the delivery time object with the maximal delivery time of all
products within the cart. Takes the selected shipping method into account.
This is used within the cart to display the maximal delivery time.
"""
max_delivery_time = None
for item in cart.items():
# Calculate the delivery time of the product. Takes the selected
# shipping method into account.
delivery_time = shipping_utils.get_product_delivery_time(
request, item.product.slug, for_cart=True)
if (max_delivery_time is None) or \
(delivery_time.as_hours() > max_delivery_time.as_hours()):
max_delivery_time = delivery_time
return max_delivery_time
# TODO: Remove cart from signature?
def get_cart_price(request, cart, total=False):
"""Returns price of the given cart.
"""
return get_cart_costs(request, cart, total)["price"]
def get_cart_costs(request, cart, total=False):
"""Returns a dictionary with price and tax of the given cart:
returns {
"price" : the cart's price,
"tax" : the cart's tax,
}
"""
if cart is None:
return {"price" : 0, "tax" : 0}
cache_key = "cart-costs-%s-%s" % (total, cart.id)
cart_costs = cache.get(cache_key)
if cart_costs is None:
items = cart.items()
cart_price = 0
cart_tax = 0
for item in items:
cart_price += item.get_price()
cart_tax += item.get_tax()
if len(items) > 0 and total:
# Shipping
shipping_method = shipping_utils.get_selected_shipping_method(request)
shipping_costs = shipping_utils.get_shipping_costs(request, shipping_method)
cart_price += shipping_costs["price"]
cart_tax += shipping_costs["tax"]
# Payment
payment_method = payment_utils.get_selected_payment_method(request)
payment_costs = payment_utils.get_payment_costs(request, payment_method)
cart_price += payment_costs["price"]
cart_tax += payment_costs["tax"]
cart_costs = {"price" : cart_price, "tax" : cart_tax}
cache.set(cache_key, cart_costs)
return cart_costs
def get_or_create_cart(request):
"""Returns the cart of the current user. If no cart exists it creates a new
one first.
"""
cart = get_cart(request)
if cart is None:
cart = create_cart(request)
return cart
def create_cart(request):
"""Creates a cart for the current session and/or user.
"""
cart = Cart(session = request.session.session_key)
if request.user.is_authenticated():
cart.user = request.user
cart.save()
return cart
def get_cart(request):
"""Returns the cart of the current customer or None.
"""
session_key = request.session.session_key
user = request.user
if user.is_authenticated():
try:
cart = cache.get("cart-%s" % user)
if cart is None:
cart = Cart.objects.get(user = user)
cache.set("cart-%s" % user, cart)
return cart
except ObjectDoesNotExist:
return None
else:
try:
cart = cache.get("cart-%s" % session_key)
if cart is None:
cart = Cart.objects.get(session = session_key)
cache.set("cart-%s" % session_key, cart)
return cart
except ObjectDoesNotExist:
return None
def get_go_on_shopping_url(request):
"""Calculates the go on shopping url.
"""
lc = request.session.get("last_category")
if lc:
return lc.get_absolute_url()
else:
return reverse("lfs_shop_view")
def update_cart_after_login(request):
"""Updates the cart after login.
1. if there is no session cart, nothing has to be done.
2. if there is a session cart and no user cart we assign the session cart
to the current user.
3. if there is a session cart and a user cart we add the session cart items
to the user cart.
"""
try:
session_cart = Cart.objects.get(session = request.session.session_key)
try:
user_cart = Cart.objects.get(user = request.user)
except ObjectDoesNotExist:
session_cart.user = request.user
session_cart.save()
else:
for session_cart_item in session_cart.items():
try:
user_cart_item = CartItem.objects.get(cart = user_cart, product = session_cart_item.product)
except ObjectDoesNotExist:
session_cart_item.cart = user_cart
session_cart_item.save()
else:
user_cart_item.amount += session_cart_item.amount
user_cart_item.save()
session_cart.delete()
except ObjectDoesNotExist:
pass |
986a8aae-2eae-11e5-8d1f-7831c1d44c14
98700b78-2eae-11e5-9469-7831c1d44c14
98700b78-2eae-11e5-9469-7831c1d44c14 |
b4737617-2eae-11e5-9ff0-7831c1d44c14
b47a15de-2eae-11e5-870d-7831c1d44c14
b47a15de-2eae-11e5-870d-7831c1d44c14 |
"""
genrel package for GR calculations
David Clark, Kai Smith
Case Western Reserve University
2014
"""
import numpy as np
import sympy as sp
#returns a rank 3 tensor that represents the symbols
#first index corresponds to the upper index
def christoffel_symbols(metric, metric_key):
symbols = tensor(3)
inverse = inverse_metric(metric)
for alpha in range(4):
for beta in range(4):
for gamma in range(4):
total = 0
for delta in range(4):
total += inverse[alpha][delta] * (sp.diff(metric[delta][beta], metric_key[gamma])
+ sp.diff(metric[delta][gamma], metric_key[beta])
- sp.diff(metric[beta][gamma], metric_key[delta]))
symbols[alpha][beta][gamma] = sp.simplify(total/2)
return symbols
#returns the rank 4 Reimann curvature tensor
#the first index corresponds to an upper index -- the rest are lower
def reimann_tensor(chris_sym, metric_key):
reimann = tensor(4)
for alpha in range(4):
for beta in range(4):
for gamma in range(4):
for delta in range(4):
total = 0
total += sp.diff(chris_sym[alpha][beta][delta], metric_key[gamma])
total -= sp.diff(chris_sym[alpha][beta][gamma], metric_key[delta])
for epsilon in range(4):
total += chris_sym[alpha][gamma][epsilon]*chris_sym[epsilon][beta][delta]
total -= chris_sym[alpha][delta][epsilon]*chris_sym[epsilon][beta][gamma]
reimann[alpha][beta][gamma][delta] = sp.cancel(total)
return reimann
#returns the rank 2 Ricci curvature tensor
#both indicies are lower
def ricci_tensor(reimann):
ricci = tensor(2)
for alpha in range(4):
for beta in range(4):
total = 0
for gamma in range(4):
total += reimann[gamma][alpha][gamma][beta]
ricci[alpha][beta] = sp.cancel(total)
return ricci
#returns the Ricci scalar, a sympy symbol
def ricci_scalar(ricci_t, metric):
scalar = 0
inverse = inverse_metric(metric)
for alpha in range(4):
for beta in range(4):
scalar += inverse[alpha][beta] * ricci_t[alpha][beta]
scalar = sp.cancel(scalar)
return scalar
#returns the rank 2 Einstein tensor
#both indices are lower
#think about whether you need to call raise_one_index before equating with a stress-energy tensor
def einstein_tensor(ricci_t, ricci_s, metric):
einstein = tensor(2)
for alpha in range(4):
for beta in range(4):
einstein[alpha][beta] = sp.cancel(ricci_t[alpha][beta] - 0.5*metric[alpha][beta]*ricci_s)
return einstein
#runs through all parts of the program to find the Einstein tensor given only the metric and its key
def einstein_tensor_from_scratch(metric, metric_key, showprogress = False):
c_syms = christoffel_symbols(metric, metric_key)
if showprogress: print("Christoffel Symbols calculated")
reimann_t = reimann_tensor(c_syms, metric_key)
if showprogress: print("Reimann Tensor calculated")
ricci_t = ricci_tensor(reimann_t)
if showprogress: print("Ricci Tensor calculated")
ricci_s = ricci_scalar(ricci_t, metric)
if showprogress: print("Ricci Scalar calculated")
return einstein_tensor(ricci_t, ricci_s, metric)
#returns expressions which, when set equal to zero, give the Einstein equations
def einstein_equations(einstein_tensor, stress_energy_tensor):
einstein_equations = []
for alpha in range(4):
for beta in range(4):
eq = sp.simplify(einstein_tensor[alpha][beta] - 8*sp.pi*sp.Symbol('G')*stress_energy_tensor[alpha][beta])
if eq != 0 and eq not in einstein_equations:
einstein_equations.append(eq)
return np.array(einstein_equations)
def conservation_equations(metric, metric_key, stress_energy_tensor):
equations = []
stress_energy_tensor = raise_one_index(stress_energy_tensor, metric)
cs = christoffel_symbols(metric, metric_key)
for u in range(4):
eq = 0
for v in range(4):
eq += sp.diff(stress_energy_tensor[u][v], metric_key[v])
for s in range(4):
eq += stress_energy_tensor[s][v]*cs[u][s][v]
eq += stress_energy_tensor[u][s]*cs[v][s][v]
eq = sp.simplify(eq)
if eq != 0 and eq not in equations:
equations.append(eq)
return np.array(equations)
#returns a 4 x 4 x ... x 4 array of sympy symbols which represent a tensor
def tensor(rank):
shape = [4 for i in range(rank)]
return np.empty(shape, dtype = type(sp.Symbol('')))
#returns a 4 x 4 x ... x 4 array of sympy symbols filled with zeros which represent a tensor
def zerotensor(rank):
shape = [4 for i in range(rank)]
return np.zeros(shape, dtype = type(sp.Symbol('')))
#returns the rank of the tensor, passed in as a numpy array
def rank(tensor):
return len(tensor.shape)
#returns the inverse of metric
def inverse_metric(metric):
return np.array(sp.Matrix(metric).inv())
#matrix-multiplies the inverse metric and the tensor
#represents raising one index on a rank 2 tensor
def raise_one_index(tensor, metric, index = 1):
return np.tensordot(inverse_metric(metric), tensor, index)
#matrix-multiplies the metric and the tensor
#represents lowering one index on a rank 2 tensor
def lower_one_index(tensor, metric, index = 1):
return np.tensordot(metric, tensor, index)
def kronecker_delta(a, b):
if a == b:
return 1
return 0
def inverse_perturbations(metric, pert):
h_inv = tensor(2)
g_inv = inverse_metric(metric)
for u in range(4):
for v in range(4):
total = 0
for rho in range(4):
for sigma in range(4):
total -= g_inv[u][rho]*g_inv[v][sigma]*pert[rho][sigma]
h_inv[u][v] = sp.simplify(total)
return h_inv
#First index corresponds to upper index
def perturbed_christoffel_symbols(metric, metric_key, perturbations, christoffel=None):
perturbed_symbols = tensor(3)
symbols = christoffel_symbols(metric, metric_key) if christoffel == None else christoffel
inverse = inverse_metric(metric)
for mu in range(4):
for nu in range(4):
for lamb in range(4):
total = 0
for rho in range(4):
for sigma in range(4):
total -= inverse[mu][rho]*perturbations[rho][sigma]*symbols[sigma][nu][lamb]
total += sp.Rational(1,2)*inverse[mu][rho]*(
sp.diff(perturbations[rho][nu], metric_key[lamb])
+sp.diff(perturbations[rho][lamb], metric_key[nu])
-sp.diff(perturbations[lamb][nu], metric_key[rho]))
perturbed_symbols[mu][nu][lamb] = sp.simplify(total)
return perturbed_symbols
def perturbed_ricci_tensor(metric, metric_key, pert, chris = None, dchris = None):
dRicci = tensor(2)
if chris == None:
chris = christoffel_symbols(metric, metric_key)
if dchris == None:
dchris = perturbed_christoffel_symbols(metric, metric_key, pert, chris)
for u in range(4):
for k in range(4):
total = 0
for l in range(4):
total += sp.diff(dchris[l][u][l], metric_key[k])
total -= sp.diff(dchris[l][u][k], metric_key[l])
for v in range(4):
for e in range(4):
total += dchris[e][u][v]*chris[v][k][e]
total += dchris[v][k][e]*chris[e][u][v]
total -= dchris[e][u][k]*chris[v][v][e]
total -= dchris[v][v][e]*chris[e][u][k]
dRicci[u][k] = sp.simplify(total)
return dRicci
#prints a tensor (or a sympy scalar) in a readable form
def rprint(obj, position = []):
if type(obj) != type(np.array([])):
if obj != 0:
sp.pprint(sp.simplify(obj))
else:
for n, entry in enumerate(obj):
if type(entry) != type(np.array([])) and entry != 0:
print(str(position + [n]) + ": ")
sp.pprint(sp.simplify(entry))
else:
rprint(entry, position + [n])
#prints a tensor (or a sympy scalar) in LaTeX
def lprint(obj, position = []):
if type(obj) != type(np.array([])):
if obj != 0:
print(sp.latex(sp.simplify(entry)))
else:
for n, entry in enumerate(obj):
if type(entry) != type(np.array([])) and entry != 0:
print(str(position + [n]) + ": ")
print(sp.latex(sp.simplify(entry)))
else:
lprint(entry, position + [n])
"""#Prints a sympy expression or expressions in a Mathematica ready form
def mprint(obj, position = []):
if type(obj) != type(np.array([])):
if obj != 0:
print(mathematicize(obj))
else:
for n, entry in enumerate(obj):
if type(entry) != type(np.array([])) and entry != 0:
print(str(position + [n]) + ": ")
print(mathematicize(entry))
else:
mprint(entry, position + [n])"""
#Prints a sympy expression or expressions in a Mathematica (Matrix) ready form
def mprint(obj, position = [], eol = True):
if type(obj) != type(np.array([])):
if obj != 0:
print(mathematicize(obj))
else:
print('{')
for n, entry in enumerate(obj):
if type(entry) != type(np.array([])): #and entry != 0:
#print(str(position + [n]) + ": ")
print(mathematicize(entry))
if n != len(obj)-1:
print(',')
else:
mprint(entry, eol = len(obj)-1) #, position + [n])
print('}')
if n != len(obj)-1:
print(',')
if eol == True:
print('}')
#Turns a single expression into Mathematica readable form
def mathematicize(exp):
#NOTE: Program currently assumes that all functions are functions of time and all derivatives are with respect to time
exp = str(exp)
#Deals with exponentiation
exp = exp.replace('**', '^')
#Deals with derivatives
while exp.find('Derivative') != -1:
plevel = 1
clevel = 0
fname = ""
start = exp.find('Derivative')
i = start + 11
while plevel > 0:
if exp[i] == '(':
plevel += 1
elif exp[i] == ')':
plevel -= 1
elif exp[i] == ',':
clevel += 1
elif plevel == 1 and clevel == 0:
fname += exp[i]
i += 1
end = i
exp = exp[:start] + fname + '\''*clevel +'[t]' + exp[end:]
#Deals with giving function calls square brackets
exp = exp.replace('(t)', '[t]')
return exp
if __name__ == "__main__":
#Defines commonly used variable and functions
t = sp.Symbol('t')
r = sp.Symbol('r')
theta = sp.Symbol('theta')
phi = sp.Symbol('phi')
x = sp.Symbol('x')
y = sp.Symbol('y')
z = sp.Symbol('z')
k = sp.Symbol('k')
pi = sp.pi
a = sp.Function('a')(t)
b = sp.Function('b')(t)
c = sp.Function('c')(t)
a0 = sp.Symbol('a0')
b0 = sp.Symbol('b0')
c0 = sp.Symbol('c0')
#w = sp.Rational(1, 3)#sp.Symbol('w')
#rho = sp.Function('rho')(t)
#p = w*rho
#G = sp.Symbol('G')
I0 = sp.Symbol('I0')
omega0 = sp.Symbol('Omega0')
rho0 = sp.Symbol('rho0')#I0*omega0/(8 * sp.pi * G)
p0 = sp.Symbol('p0')#w*rho0
#FRW metric
frw_metric, frw_metric_key = np.diag([-1, a**2/(1-k*r**2), a**2*r**2,a**2*r**2*sp.sin(theta)**2]), [t, r, theta, phi]
#Bianchi metric (currently flat, does not assume isotropy)
bc_metric, bc_metric_key = np.diag([-1, a**2, a**2, a**2]), [t, x, y, z]
#Bianchi with cylindrical curvarute
bcurve, bcurve_key = np.diag([-1, a**2/(1-k*r**2), a**2*r**2, b**2]), [t, r, theta, z]
#Generalized Schwartzchild metric
A, B = sp.Function('A')(r), sp.Function('B')(r)
sc_metric, sc_metric_ky = np.diag([B, A, r**2, r**2*sp.sin(theta)**2]), [t, r, theta, phi]
#FRW cartesian metric
frw_c_metric_key = [t, x, y, z]
frw_c_metric = zerotensor(2)
frw_c_metric[0][0] = -1
for i in range(1, 4):
for j in range(1, 4):
frw_c_metric[i][j] = a**2*(kronecker_delta(i, j) +
k*((frw_c_metric_key[i]*frw_c_metric_key[j])/(1-k*(x**2+y**2+z**2))))
#rprint(frw_c_metric)
#FRW cartesian metric generalized to Bianchi
b_c_metric_key = [t, x, y, z]
b_c_scale_factors = [-1,a,b,c]
b_c_metric = zerotensor(2)
b_c_metric[0][0] = -1
for i in range(1, 4):
for j in range(1, 4):
b_c_metric[i][j] = b_c_scale_factors[i]*b_c_scale_factors[j]*(kronecker_delta(i, j) +
k*((b_c_metric_key[i]*b_c_metric_key[j])/(1-k*(x**2+y**2+z**2))))
#mprint(b_c_metric)
perturbations = tensor(2)
for r in range(4):
for c in range(4):
perturbations[r][c] = sp.Function('h'+str(r)+str(c))(t, x, y, z)
rprint(perturbed_ricci_tensor(bc_metric, bc_metric_key, perturbations))
#T = np.diag([-rho0*(a0*b0*c0/(a*b*c))**sp.Rational(4, 3) - (3.0*k)/((a*b*c)**sp.Rational(2, 3)*8*pi*G) , p0*a0**2*b0*c0/(a**2*b*c) - k/(a**2*8*pi*G), p0*a0*b0**2*c0/(a*b**2*c) - k/(b**2*8*pi*G), p0*a0*b0*c0**2/(a*b*c**2) - k/(c**2*8*pi*G)])
#T = np.diag([-rho0*(a0/a)**4.0, (rho0*(a0/a)**4.0)/3.0, (rho0*(a0/a)**4.0)/3.0, (rho0*(a0/a)**4.0)/3.0])
#T = np.diag([0, 0, 0, 0])
#rho = sp.Symbol('rho')
#p = sp.Symbol('p')
#p1 = sp.Symbol('p1')
#p2 = sp.Symbol('p2')
#T = np.diag([-rho, p, p, p])
#einstein = raise_one_index(einstein_tensor_from_scratch(frw_metric, frw_metric_key, showprogress = True), frw_metric)
#rprint(einstein)
#print('Bianchi Spacetime Einstein Equations:')
#ein_eq = einstein_equations(einstein, T)
#rprint(einstein[1,1]*einstein[2,2]*einstein[3,3]/einstein[0,0]**3-(p0/rho0)**3)
#rprint(einstein)
#print(sp.simplify(-1*ein_eq[3] + sum(ein_eq[:3])))
#print('Conservation Equation for Bianchi Spacetime:')
#rprint(conservation_equations(frw_metric, frw_metric_key, T))
#einstein = raise_one_index(einstein_tensor_from_scratch(frw_c_metric, bc_metric_key), frw_c_metric, showprogress = True)
#print('FRW Spacetime Einstein Equations:')
#rprint(einstein_equations(einstein, np.diag([-rho, p, p, p])))
#print('FRW Equation for Bianchi Spacetime:')
#rprint(conservation_equations(frw_c_metric, frw_c_metric_key, np.diag([-rho, p, p, p])))
Added perturbed_einstein_equations
"""
genrel package for GR calculations
David Clark, Kai Smith
Case Western Reserve University
2014
"""
import numpy as np
import sympy as sp
#returns a rank 3 tensor that represents the symbols
#first index corresponds to the upper index
def christoffel_symbols(metric, metric_key):
symbols = tensor(3)
inverse = inverse_metric(metric)
for alpha in range(4):
for beta in range(4):
for gamma in range(4):
total = 0
for delta in range(4):
total += inverse[alpha][delta] * (sp.diff(metric[delta][beta], metric_key[gamma])
+ sp.diff(metric[delta][gamma], metric_key[beta])
- sp.diff(metric[beta][gamma], metric_key[delta]))
symbols[alpha][beta][gamma] = sp.simplify(total/2)
return symbols
#returns the rank 4 Reimann curvature tensor
#the first index corresponds to an upper index -- the rest are lower
def reimann_tensor(chris_sym, metric_key):
reimann = tensor(4)
for alpha in range(4):
for beta in range(4):
for gamma in range(4):
for delta in range(4):
total = 0
total += sp.diff(chris_sym[alpha][beta][delta], metric_key[gamma])
total -= sp.diff(chris_sym[alpha][beta][gamma], metric_key[delta])
for epsilon in range(4):
total += chris_sym[alpha][gamma][epsilon]*chris_sym[epsilon][beta][delta]
total -= chris_sym[alpha][delta][epsilon]*chris_sym[epsilon][beta][gamma]
reimann[alpha][beta][gamma][delta] = sp.cancel(total)
return reimann
#returns the rank 2 Ricci curvature tensor
#both indicies are lower
def ricci_tensor(reimann):
ricci = tensor(2)
for alpha in range(4):
for beta in range(4):
total = 0
for gamma in range(4):
total += reimann[gamma][alpha][gamma][beta]
ricci[alpha][beta] = sp.cancel(total)
return ricci
#returns the Ricci scalar, a sympy symbol
def ricci_scalar(ricci_t, metric):
scalar = 0
inverse = inverse_metric(metric)
for alpha in range(4):
for beta in range(4):
scalar += inverse[alpha][beta] * ricci_t[alpha][beta]
scalar = sp.cancel(scalar)
return scalar
#returns the rank 2 Einstein tensor
#both indices are lower
#think about whether you need to call raise_one_index before equating with a stress-energy tensor
def einstein_tensor(ricci_t, ricci_s, metric):
einstein = tensor(2)
for alpha in range(4):
for beta in range(4):
einstein[alpha][beta] = sp.cancel(ricci_t[alpha][beta] - 0.5*metric[alpha][beta]*ricci_s)
return einstein
#runs through all parts of the program to find the Einstein tensor given only the metric and its key
def einstein_tensor_from_scratch(metric, metric_key, showprogress = False):
c_syms = christoffel_symbols(metric, metric_key)
if showprogress: print("Christoffel Symbols calculated")
reimann_t = reimann_tensor(c_syms, metric_key)
if showprogress: print("Reimann Tensor calculated")
ricci_t = ricci_tensor(reimann_t)
if showprogress: print("Ricci Tensor calculated")
ricci_s = ricci_scalar(ricci_t, metric)
if showprogress: print("Ricci Scalar calculated")
return einstein_tensor(ricci_t, ricci_s, metric)
#returns expressions which, when set equal to zero, give the Einstein equations
def einstein_equations(einstein_tensor, stress_energy_tensor):
einstein_equations = []
for alpha in range(4):
for beta in range(4):
eq = sp.simplify(einstein_tensor[alpha][beta] - 8*sp.pi*sp.Symbol('G')*stress_energy_tensor[alpha][beta])
if eq != 0 and eq not in einstein_equations:
einstein_equations.append(eq)
return np.array(einstein_equations)
def conservation_equations(metric, metric_key, stress_energy_tensor):
equations = []
stress_energy_tensor = raise_one_index(stress_energy_tensor, metric)
cs = christoffel_symbols(metric, metric_key)
for u in range(4):
eq = 0
for v in range(4):
eq += sp.diff(stress_energy_tensor[u][v], metric_key[v])
for s in range(4):
eq += stress_energy_tensor[s][v]*cs[u][s][v]
eq += stress_energy_tensor[u][s]*cs[v][s][v]
eq = sp.simplify(eq)
if eq != 0 and eq not in equations:
equations.append(eq)
return np.array(equations)
#returns a 4 x 4 x ... x 4 array of sympy symbols which represent a tensor
def tensor(rank):
shape = [4 for i in range(rank)]
return np.empty(shape, dtype = type(sp.Symbol('')))
#returns a 4 x 4 x ... x 4 array of sympy symbols filled with zeros which represent a tensor
def zerotensor(rank):
shape = [4 for i in range(rank)]
return np.zeros(shape, dtype = type(sp.Symbol('')))
#returns the rank of the tensor, passed in as a numpy array
def rank(tensor):
return len(tensor.shape)
#returns the inverse of metric
def inverse_metric(metric):
return np.array(sp.Matrix(metric).inv())
#matrix-multiplies the inverse metric and the tensor
#represents raising one index on a rank 2 tensor
def raise_one_index(tensor, metric, index = 1):
return np.tensordot(inverse_metric(metric), tensor, index)
#matrix-multiplies the metric and the tensor
#represents lowering one index on a rank 2 tensor
def lower_one_index(tensor, metric, index = 1):
return np.tensordot(metric, tensor, index)
def kronecker_delta(a, b):
if a == b:
return 1
return 0
def inverse_perturbations(metric, pert):
h_inv = tensor(2)
g_inv = inverse_metric(metric)
for u in range(4):
for v in range(4):
total = 0
for rho in range(4):
for sigma in range(4):
total -= g_inv[u][rho]*g_inv[v][sigma]*pert[rho][sigma]
h_inv[u][v] = sp.simplify(total)
return h_inv
#First index corresponds to upper index
def perturbed_christoffel_symbols(metric, metric_key, perturbations, christoffel=None):
perturbed_symbols = tensor(3)
symbols = christoffel_symbols(metric, metric_key) if christoffel == None else christoffel
inverse = inverse_metric(metric)
for mu in range(4):
for nu in range(4):
for lamb in range(4):
total = 0
for rho in range(4):
for sigma in range(4):
total -= inverse[mu][rho]*perturbations[rho][sigma]*symbols[sigma][nu][lamb]
total += sp.Rational(1,2)*inverse[mu][rho]*(
sp.diff(perturbations[rho][nu], metric_key[lamb])
+sp.diff(perturbations[rho][lamb], metric_key[nu])
-sp.diff(perturbations[lamb][nu], metric_key[rho]))
perturbed_symbols[mu][nu][lamb] = sp.simplify(total)
return perturbed_symbols
def perturbed_ricci_tensor(metric, metric_key, pert, chris = None, dchris = None):
dRicci = tensor(2)
if chris == None:
chris = christoffel_symbols(metric, metric_key)
if dchris == None:
dchris = perturbed_christoffel_symbols(metric, metric_key, pert, chris)
for u in range(4):
for k in range(4):
total = 0
for l in range(4):
total += sp.diff(dchris[l][u][l], metric_key[k])
total -= sp.diff(dchris[l][u][k], metric_key[l])
for v in range(4):
for e in range(4):
total += dchris[e][u][v]*chris[v][k][e]
total += dchris[v][k][e]*chris[e][u][v]
total -= dchris[e][u][k]*chris[v][v][e]
total -= dchris[v][v][e]*chris[e][u][k]
dRicci[u][k] = sp.simplify(total)
return dRicci
def perturbed_source_tensor(metric, stress_energy, perturbed_stress_energy, perturbations):
perturbed_source = tensor(2)
stress_energy_trace = sum(stress_energy[i][i] for i in range(4))
perturbed_stress_energy_trace = sum(perturbed_stress_energy[i][i] for i in range(4))
for mu in range(4):
for nu in range(4):
perturbed_source[mu][nu] = perturbed_stress_energy[mu][nu]
-sp.Rational(1,2)*metric[mu][nu]*perturbed_stress_energy_trace
-sp.Rational(1,2)*perturbations[mu][nu]*stress_energy_trace
return sp.simplify(perturbed_source)
def perturbed_einstein_equations(perturbed_ricci_tesor, perturbed_source_tesor):
equations = []
for mu in range(4):
for nu in range(4):
eq = sp.simplify(perturbed_ricci_tesor[mu][nu]
+8*sp.pi*sp.Symbol('G')*perturbed_source_tesor[mu][nu])
if eq != 0 and eq not in equations:
equations.append(eq)
return np.array(equations)
#prints a tensor (or a sympy scalar) in a readable form
def rprint(obj, position = []):
if type(obj) != type(np.array([])):
if obj != 0:
sp.pprint(sp.simplify(obj))
else:
for n, entry in enumerate(obj):
if type(entry) != type(np.array([])) and entry != 0:
print(str(position + [n]) + ": ")
sp.pprint(sp.simplify(entry))
else:
rprint(entry, position + [n])
#prints a tensor (or a sympy scalar) in LaTeX
def lprint(obj, position = []):
if type(obj) != type(np.array([])):
if obj != 0:
print(sp.latex(sp.simplify(entry)))
else:
for n, entry in enumerate(obj):
if type(entry) != type(np.array([])) and entry != 0:
print(str(position + [n]) + ": ")
print(sp.latex(sp.simplify(entry)))
else:
lprint(entry, position + [n])
"""#Prints a sympy expression or expressions in a Mathematica ready form
def mprint(obj, position = []):
if type(obj) != type(np.array([])):
if obj != 0:
print(mathematicize(obj))
else:
for n, entry in enumerate(obj):
if type(entry) != type(np.array([])) and entry != 0:
print(str(position + [n]) + ": ")
print(mathematicize(entry))
else:
mprint(entry, position + [n])"""
#Prints a sympy expression or expressions in a Mathematica (Matrix) ready form
def mprint(obj, position = [], eol = True):
if type(obj) != type(np.array([])):
if obj != 0:
print(mathematicize(obj))
else:
print('{')
for n, entry in enumerate(obj):
if type(entry) != type(np.array([])): #and entry != 0:
#print(str(position + [n]) + ": ")
print(mathematicize(entry))
if n != len(obj)-1:
print(',')
else:
mprint(entry, eol = len(obj)-1) #, position + [n])
print('}')
if n != len(obj)-1:
print(',')
if eol == True:
print('}')
#Turns a single expression into Mathematica readable form
def mathematicize(exp):
#NOTE: Program currently assumes that all functions are functions of time and all derivatives are with respect to time
exp = str(exp)
#Deals with exponentiation
exp = exp.replace('**', '^')
#Deals with derivatives
while exp.find('Derivative') != -1:
plevel = 1
clevel = 0
fname = ""
start = exp.find('Derivative')
i = start + 11
while plevel > 0:
if exp[i] == '(':
plevel += 1
elif exp[i] == ')':
plevel -= 1
elif exp[i] == ',':
clevel += 1
elif plevel == 1 and clevel == 0:
fname += exp[i]
i += 1
end = i
exp = exp[:start] + fname + '\''*clevel +'[t]' + exp[end:]
#Deals with giving function calls square brackets
exp = exp.replace('(t)', '[t]')
return exp
if __name__ == "__main__":
#Defines commonly used variable and functions
t = sp.Symbol('t')
r = sp.Symbol('r')
theta = sp.Symbol('theta')
phi = sp.Symbol('phi')
x = sp.Symbol('x')
y = sp.Symbol('y')
z = sp.Symbol('z')
k = sp.Symbol('k')
pi = sp.pi
a = sp.Function('a')(t)
b = sp.Function('b')(t)
c = sp.Function('c')(t)
a0 = sp.Symbol('a0')
b0 = sp.Symbol('b0')
c0 = sp.Symbol('c0')
#w = sp.Rational(1, 3)#sp.Symbol('w')
#rho = sp.Function('rho')(t)
#p = w*rho
#G = sp.Symbol('G')
I0 = sp.Symbol('I0')
omega0 = sp.Symbol('Omega0')
rho0 = sp.Symbol('rho0')#I0*omega0/(8 * sp.pi * G)
p0 = sp.Symbol('p0')#w*rho0
#FRW metric
frw_metric, frw_metric_key = np.diag([-1, a**2/(1-k*r**2), a**2*r**2,a**2*r**2*sp.sin(theta)**2]), [t, r, theta, phi]
#Bianchi metric (currently flat, does not assume isotropy)
bc_metric, bc_metric_key = np.diag([-1, a**2, a**2, a**2]), [t, x, y, z]
#Bianchi with cylindrical curvarute
bcurve, bcurve_key = np.diag([-1, a**2/(1-k*r**2), a**2*r**2, b**2]), [t, r, theta, z]
#Generalized Schwartzchild metric
A, B = sp.Function('A')(r), sp.Function('B')(r)
sc_metric, sc_metric_ky = np.diag([B, A, r**2, r**2*sp.sin(theta)**2]), [t, r, theta, phi]
#FRW cartesian metric
frw_c_metric_key = [t, x, y, z]
frw_c_metric = zerotensor(2)
frw_c_metric[0][0] = -1
for i in range(1, 4):
for j in range(1, 4):
frw_c_metric[i][j] = a**2*(kronecker_delta(i, j) +
k*((frw_c_metric_key[i]*frw_c_metric_key[j])/(1-k*(x**2+y**2+z**2))))
#rprint(frw_c_metric)
#FRW cartesian metric generalized to Bianchi
b_c_metric_key = [t, x, y, z]
b_c_scale_factors = [-1,a,b,c]
b_c_metric = zerotensor(2)
b_c_metric[0][0] = -1
for i in range(1, 4):
for j in range(1, 4):
b_c_metric[i][j] = b_c_scale_factors[i]*b_c_scale_factors[j]*(kronecker_delta(i, j) +
k*((b_c_metric_key[i]*b_c_metric_key[j])/(1-k*(x**2+y**2+z**2))))
#mprint(b_c_metric)
perturbations = tensor(2)
for r in range(4):
for c in range(4):
perturbations[r][c] = sp.Function('h'+str(r)+str(c))(t, x, y, z)
dRicci = perturbed_ricci_tensor(bc_metric, bc_metric_key, perturbations)
rprint(dRicci)
#T = np.diag([-rho0*(a0*b0*c0/(a*b*c))**sp.Rational(4, 3) - (3.0*k)/((a*b*c)**sp.Rational(2, 3)*8*pi*G) , p0*a0**2*b0*c0/(a**2*b*c) - k/(a**2*8*pi*G), p0*a0*b0**2*c0/(a*b**2*c) - k/(b**2*8*pi*G), p0*a0*b0*c0**2/(a*b*c**2) - k/(c**2*8*pi*G)])
#T = np.diag([-rho0*(a0/a)**4.0, (rho0*(a0/a)**4.0)/3.0, (rho0*(a0/a)**4.0)/3.0, (rho0*(a0/a)**4.0)/3.0])
#T = np.diag([0, 0, 0, 0])
#rho = sp.Symbol('rho')
#p = sp.Symbol('p')
#p1 = sp.Symbol('p1')
#p2 = sp.Symbol('p2')
#T = np.diag([-rho, p, p, p])
#einstein = raise_one_index(einstein_tensor_from_scratch(frw_metric, frw_metric_key, showprogress = True), frw_metric)
#rprint(einstein)
#print('Bianchi Spacetime Einstein Equations:')
#ein_eq = einstein_equations(einstein, T)
#rprint(einstein[1,1]*einstein[2,2]*einstein[3,3]/einstein[0,0]**3-(p0/rho0)**3)
#rprint(einstein)
#print(sp.simplify(-1*ein_eq[3] + sum(ein_eq[:3])))
#print('Conservation Equation for Bianchi Spacetime:')
#rprint(conservation_equations(frw_metric, frw_metric_key, T))
#einstein = raise_one_index(einstein_tensor_from_scratch(frw_c_metric, bc_metric_key), frw_c_metric, showprogress = True)
#print('FRW Spacetime Einstein Equations:')
#rprint(einstein_equations(einstein, np.diag([-rho, p, p, p])))
#print('FRW Equation for Bianchi Spacetime:')
#rprint(conservation_equations(frw_c_metric, frw_c_metric_key, np.diag([-rho, p, p, p])))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.