repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
solocompt/plugs-mail
|
plugs_mail/utils.py
|
to_email
|
python
|
def to_email(email_class, email, language=None, **data):
if language:
email_class().send([email], language=language, **data)
else:
email_class().send([email], translation.get_language(), **data)
|
Send email to specified email address
|
train
|
https://github.com/solocompt/plugs-mail/blob/6139fa79ddb437562db1769d03bd3098c25a06fa/plugs_mail/utils.py#L8-L15
| null |
"""
Plugs Mail Utils
"""
from django.utils import translation
from django.contrib.auth import get_user_model
def to_user(email_class, user, **data):
"""
Email user
"""
try:
email_class().send([user.email], user.language, **data)
except AttributeError:
# this is a fallback in case the user model does not have the language field
email_class().send([user.email], translation.get_language(), **data)
def to_staff(email_class, **data):
"""
Email staff users
"""
for user in get_user_model().objects.filter(is_staff=True):
try:
email_class().send([user.email], user.language, **data)
except AttributeError:
email_class().send([user.email], translation.get_language(), **data)
def to_superuser(email_class, **data):
"""
Email superusers
"""
for user in get_user_model().objects.filter(is_superuser=True):
try:
email_class().send([user.email], user.language, **data)
except AttributeError:
email_class().send([user.email], translation.get_language(), **data)
|
solocompt/plugs-mail
|
plugs_mail/utils.py
|
to_user
|
python
|
def to_user(email_class, user, **data):
try:
email_class().send([user.email], user.language, **data)
except AttributeError:
# this is a fallback in case the user model does not have the language field
email_class().send([user.email], translation.get_language(), **data)
|
Email user
|
train
|
https://github.com/solocompt/plugs-mail/blob/6139fa79ddb437562db1769d03bd3098c25a06fa/plugs_mail/utils.py#L17-L25
| null |
"""
Plugs Mail Utils
"""
from django.utils import translation
from django.contrib.auth import get_user_model
def to_email(email_class, email, language=None, **data):
"""
Send email to specified email address
"""
if language:
email_class().send([email], language=language, **data)
else:
email_class().send([email], translation.get_language(), **data)
def to_staff(email_class, **data):
"""
Email staff users
"""
for user in get_user_model().objects.filter(is_staff=True):
try:
email_class().send([user.email], user.language, **data)
except AttributeError:
email_class().send([user.email], translation.get_language(), **data)
def to_superuser(email_class, **data):
"""
Email superusers
"""
for user in get_user_model().objects.filter(is_superuser=True):
try:
email_class().send([user.email], user.language, **data)
except AttributeError:
email_class().send([user.email], translation.get_language(), **data)
|
solocompt/plugs-mail
|
plugs_mail/utils.py
|
to_staff
|
python
|
def to_staff(email_class, **data):
for user in get_user_model().objects.filter(is_staff=True):
try:
email_class().send([user.email], user.language, **data)
except AttributeError:
email_class().send([user.email], translation.get_language(), **data)
|
Email staff users
|
train
|
https://github.com/solocompt/plugs-mail/blob/6139fa79ddb437562db1769d03bd3098c25a06fa/plugs_mail/utils.py#L27-L35
| null |
"""
Plugs Mail Utils
"""
from django.utils import translation
from django.contrib.auth import get_user_model
def to_email(email_class, email, language=None, **data):
"""
Send email to specified email address
"""
if language:
email_class().send([email], language=language, **data)
else:
email_class().send([email], translation.get_language(), **data)
def to_user(email_class, user, **data):
"""
Email user
"""
try:
email_class().send([user.email], user.language, **data)
except AttributeError:
# this is a fallback in case the user model does not have the language field
email_class().send([user.email], translation.get_language(), **data)
def to_superuser(email_class, **data):
"""
Email superusers
"""
for user in get_user_model().objects.filter(is_superuser=True):
try:
email_class().send([user.email], user.language, **data)
except AttributeError:
email_class().send([user.email], translation.get_language(), **data)
|
solocompt/plugs-mail
|
plugs_mail/utils.py
|
to_superuser
|
python
|
def to_superuser(email_class, **data):
for user in get_user_model().objects.filter(is_superuser=True):
try:
email_class().send([user.email], user.language, **data)
except AttributeError:
email_class().send([user.email], translation.get_language(), **data)
|
Email superusers
|
train
|
https://github.com/solocompt/plugs-mail/blob/6139fa79ddb437562db1769d03bd3098c25a06fa/plugs_mail/utils.py#L37-L45
| null |
"""
Plugs Mail Utils
"""
from django.utils import translation
from django.contrib.auth import get_user_model
def to_email(email_class, email, language=None, **data):
"""
Send email to specified email address
"""
if language:
email_class().send([email], language=language, **data)
else:
email_class().send([email], translation.get_language(), **data)
def to_user(email_class, user, **data):
"""
Email user
"""
try:
email_class().send([user.email], user.language, **data)
except AttributeError:
# this is a fallback in case the user model does not have the language field
email_class().send([user.email], translation.get_language(), **data)
def to_staff(email_class, **data):
"""
Email staff users
"""
for user in get_user_model().objects.filter(is_staff=True):
try:
email_class().send([user.email], user.language, **data)
except AttributeError:
email_class().send([user.email], translation.get_language(), **data)
|
solocompt/plugs-mail
|
plugs_mail/mail.py
|
PlugsMail.validate_context
|
python
|
def validate_context(self):
if self.context and len(self.context) != len(set(self.context)):
LOGGER.error('Cannot have duplicated context objects')
raise Exception('Cannot have duplicated context objects.')
|
Make sure there are no duplicate context objects
or we might end up with switched data
Converting the tuple to a set gets rid of the
eventual duplicate objects, comparing the length
of the original tuple and set tells us if we
have duplicates in the tuple or not
|
train
|
https://github.com/solocompt/plugs-mail/blob/6139fa79ddb437562db1769d03bd3098c25a06fa/plugs_mail/mail.py#L33-L45
| null |
class PlugsMail(object):
"""
Solo mail is the class responsible for
getting and validating the context and
prepare the email for sending
"""
template = None
context = None
context_data = {}
data = None
def __init__(self):
self.validate_context()
assert self.template, 'Must set template attribute on subclass.'
def get_instance_of(self, model_cls):
"""
Search the data to find a instance
of a model specified in the template
"""
for obj in self.data.values():
if isinstance(obj, model_cls):
return obj
LOGGER.error('Context Not Found')
raise Exception('Context Not Found')
def get_context(self):
"""
Create a dict with the context data
context is not required, but if it
is defined it should be a tuple
"""
if not self.context:
return
else:
assert isinstance(self.context, tuple), 'Expected a Tuple not {0}'.format(type(self.context))
for model in self.context:
model_cls = utils.get_model_class(model)
key = utils.camel_to_snake(model_cls.__name__)
self.context_data[key] = self.get_instance_of(model_cls)
def get_extra_context(self):
"""
Override this method if you want to provide
extra context. The extra_context must be a dict.
Be very careful no validation is being performed.
"""
return {}
def get_context_data(self):
"""
Context Data is equal to context + extra_context
Merge the dicts context_data and extra_context and
update state
"""
self.get_context()
self.context_data.update(self.get_extra_context())
return self.context_data
def send(self, to, language=None, **data):
"""
This is the method to be called
"""
self.data = data
self.get_context_data()
if app_settings['SEND_EMAILS']:
try:
if language:
mail.send(to, template=self.template, context=self.context_data, language=language)
else:
mail.send(to, template=self.template, context=self.context_data)
except EmailTemplate.DoesNotExist:
msg = 'Trying to use a non existent email template {0}'.format(self.template)
LOGGER.error('Trying to use a non existent email template {0}'.format(self.template))
|
solocompt/plugs-mail
|
plugs_mail/mail.py
|
PlugsMail.get_instance_of
|
python
|
def get_instance_of(self, model_cls):
for obj in self.data.values():
if isinstance(obj, model_cls):
return obj
LOGGER.error('Context Not Found')
raise Exception('Context Not Found')
|
Search the data to find a instance
of a model specified in the template
|
train
|
https://github.com/solocompt/plugs-mail/blob/6139fa79ddb437562db1769d03bd3098c25a06fa/plugs_mail/mail.py#L47-L56
| null |
class PlugsMail(object):
"""
Solo mail is the class responsible for
getting and validating the context and
prepare the email for sending
"""
template = None
context = None
context_data = {}
data = None
def __init__(self):
self.validate_context()
assert self.template, 'Must set template attribute on subclass.'
def validate_context(self):
"""
Make sure there are no duplicate context objects
or we might end up with switched data
Converting the tuple to a set gets rid of the
eventual duplicate objects, comparing the length
of the original tuple and set tells us if we
have duplicates in the tuple or not
"""
if self.context and len(self.context) != len(set(self.context)):
LOGGER.error('Cannot have duplicated context objects')
raise Exception('Cannot have duplicated context objects.')
def get_context(self):
"""
Create a dict with the context data
context is not required, but if it
is defined it should be a tuple
"""
if not self.context:
return
else:
assert isinstance(self.context, tuple), 'Expected a Tuple not {0}'.format(type(self.context))
for model in self.context:
model_cls = utils.get_model_class(model)
key = utils.camel_to_snake(model_cls.__name__)
self.context_data[key] = self.get_instance_of(model_cls)
def get_extra_context(self):
"""
Override this method if you want to provide
extra context. The extra_context must be a dict.
Be very careful no validation is being performed.
"""
return {}
def get_context_data(self):
"""
Context Data is equal to context + extra_context
Merge the dicts context_data and extra_context and
update state
"""
self.get_context()
self.context_data.update(self.get_extra_context())
return self.context_data
def send(self, to, language=None, **data):
"""
This is the method to be called
"""
self.data = data
self.get_context_data()
if app_settings['SEND_EMAILS']:
try:
if language:
mail.send(to, template=self.template, context=self.context_data, language=language)
else:
mail.send(to, template=self.template, context=self.context_data)
except EmailTemplate.DoesNotExist:
msg = 'Trying to use a non existent email template {0}'.format(self.template)
LOGGER.error('Trying to use a non existent email template {0}'.format(self.template))
|
solocompt/plugs-mail
|
plugs_mail/mail.py
|
PlugsMail.get_context
|
python
|
def get_context(self):
if not self.context:
return
else:
assert isinstance(self.context, tuple), 'Expected a Tuple not {0}'.format(type(self.context))
for model in self.context:
model_cls = utils.get_model_class(model)
key = utils.camel_to_snake(model_cls.__name__)
self.context_data[key] = self.get_instance_of(model_cls)
|
Create a dict with the context data
context is not required, but if it
is defined it should be a tuple
|
train
|
https://github.com/solocompt/plugs-mail/blob/6139fa79ddb437562db1769d03bd3098c25a06fa/plugs_mail/mail.py#L58-L71
|
[
"def get_instance_of(self, model_cls):\n \"\"\"\n Search the data to find a instance\n of a model specified in the template\n \"\"\"\n for obj in self.data.values():\n if isinstance(obj, model_cls):\n return obj\n LOGGER.error('Context Not Found')\n raise Exception('Context Not Found')\n"
] |
class PlugsMail(object):
"""
Solo mail is the class responsible for
getting and validating the context and
prepare the email for sending
"""
template = None
context = None
context_data = {}
data = None
def __init__(self):
self.validate_context()
assert self.template, 'Must set template attribute on subclass.'
def validate_context(self):
"""
Make sure there are no duplicate context objects
or we might end up with switched data
Converting the tuple to a set gets rid of the
eventual duplicate objects, comparing the length
of the original tuple and set tells us if we
have duplicates in the tuple or not
"""
if self.context and len(self.context) != len(set(self.context)):
LOGGER.error('Cannot have duplicated context objects')
raise Exception('Cannot have duplicated context objects.')
def get_instance_of(self, model_cls):
"""
Search the data to find a instance
of a model specified in the template
"""
for obj in self.data.values():
if isinstance(obj, model_cls):
return obj
LOGGER.error('Context Not Found')
raise Exception('Context Not Found')
def get_extra_context(self):
"""
Override this method if you want to provide
extra context. The extra_context must be a dict.
Be very careful no validation is being performed.
"""
return {}
def get_context_data(self):
"""
Context Data is equal to context + extra_context
Merge the dicts context_data and extra_context and
update state
"""
self.get_context()
self.context_data.update(self.get_extra_context())
return self.context_data
def send(self, to, language=None, **data):
"""
This is the method to be called
"""
self.data = data
self.get_context_data()
if app_settings['SEND_EMAILS']:
try:
if language:
mail.send(to, template=self.template, context=self.context_data, language=language)
else:
mail.send(to, template=self.template, context=self.context_data)
except EmailTemplate.DoesNotExist:
msg = 'Trying to use a non existent email template {0}'.format(self.template)
LOGGER.error('Trying to use a non existent email template {0}'.format(self.template))
|
solocompt/plugs-mail
|
plugs_mail/mail.py
|
PlugsMail.get_context_data
|
python
|
def get_context_data(self):
self.get_context()
self.context_data.update(self.get_extra_context())
return self.context_data
|
Context Data is equal to context + extra_context
Merge the dicts context_data and extra_context and
update state
|
train
|
https://github.com/solocompt/plugs-mail/blob/6139fa79ddb437562db1769d03bd3098c25a06fa/plugs_mail/mail.py#L82-L90
|
[
"def get_context(self):\n \"\"\"\n Create a dict with the context data\n context is not required, but if it\n is defined it should be a tuple\n \"\"\"\n if not self.context:\n return\n else:\n assert isinstance(self.context, tuple), 'Expected a Tuple not {0}'.format(type(self.context))\n for model in self.context:\n model_cls = utils.get_model_class(model)\n key = utils.camel_to_snake(model_cls.__name__)\n self.context_data[key] = self.get_instance_of(model_cls)\n",
"def get_extra_context(self):\n \"\"\"\n Override this method if you want to provide\n extra context. The extra_context must be a dict.\n Be very careful no validation is being performed.\n \"\"\"\n return {}\n"
] |
class PlugsMail(object):
"""
Solo mail is the class responsible for
getting and validating the context and
prepare the email for sending
"""
template = None
context = None
context_data = {}
data = None
def __init__(self):
self.validate_context()
assert self.template, 'Must set template attribute on subclass.'
def validate_context(self):
"""
Make sure there are no duplicate context objects
or we might end up with switched data
Converting the tuple to a set gets rid of the
eventual duplicate objects, comparing the length
of the original tuple and set tells us if we
have duplicates in the tuple or not
"""
if self.context and len(self.context) != len(set(self.context)):
LOGGER.error('Cannot have duplicated context objects')
raise Exception('Cannot have duplicated context objects.')
def get_instance_of(self, model_cls):
"""
Search the data to find a instance
of a model specified in the template
"""
for obj in self.data.values():
if isinstance(obj, model_cls):
return obj
LOGGER.error('Context Not Found')
raise Exception('Context Not Found')
def get_context(self):
"""
Create a dict with the context data
context is not required, but if it
is defined it should be a tuple
"""
if not self.context:
return
else:
assert isinstance(self.context, tuple), 'Expected a Tuple not {0}'.format(type(self.context))
for model in self.context:
model_cls = utils.get_model_class(model)
key = utils.camel_to_snake(model_cls.__name__)
self.context_data[key] = self.get_instance_of(model_cls)
def get_extra_context(self):
"""
Override this method if you want to provide
extra context. The extra_context must be a dict.
Be very careful no validation is being performed.
"""
return {}
def send(self, to, language=None, **data):
"""
This is the method to be called
"""
self.data = data
self.get_context_data()
if app_settings['SEND_EMAILS']:
try:
if language:
mail.send(to, template=self.template, context=self.context_data, language=language)
else:
mail.send(to, template=self.template, context=self.context_data)
except EmailTemplate.DoesNotExist:
msg = 'Trying to use a non existent email template {0}'.format(self.template)
LOGGER.error('Trying to use a non existent email template {0}'.format(self.template))
|
solocompt/plugs-mail
|
plugs_mail/mail.py
|
PlugsMail.send
|
python
|
def send(self, to, language=None, **data):
self.data = data
self.get_context_data()
if app_settings['SEND_EMAILS']:
try:
if language:
mail.send(to, template=self.template, context=self.context_data, language=language)
else:
mail.send(to, template=self.template, context=self.context_data)
except EmailTemplate.DoesNotExist:
msg = 'Trying to use a non existent email template {0}'.format(self.template)
LOGGER.error('Trying to use a non existent email template {0}'.format(self.template))
|
This is the method to be called
|
train
|
https://github.com/solocompt/plugs-mail/blob/6139fa79ddb437562db1769d03bd3098c25a06fa/plugs_mail/mail.py#L92-L106
|
[
"def get_context_data(self):\n \"\"\"\n Context Data is equal to context + extra_context\n Merge the dicts context_data and extra_context and\n update state\n \"\"\"\n self.get_context()\n self.context_data.update(self.get_extra_context())\n return self.context_data\n"
] |
class PlugsMail(object):
"""
Solo mail is the class responsible for
getting and validating the context and
prepare the email for sending
"""
template = None
context = None
context_data = {}
data = None
def __init__(self):
self.validate_context()
assert self.template, 'Must set template attribute on subclass.'
def validate_context(self):
"""
Make sure there are no duplicate context objects
or we might end up with switched data
Converting the tuple to a set gets rid of the
eventual duplicate objects, comparing the length
of the original tuple and set tells us if we
have duplicates in the tuple or not
"""
if self.context and len(self.context) != len(set(self.context)):
LOGGER.error('Cannot have duplicated context objects')
raise Exception('Cannot have duplicated context objects.')
def get_instance_of(self, model_cls):
"""
Search the data to find a instance
of a model specified in the template
"""
for obj in self.data.values():
if isinstance(obj, model_cls):
return obj
LOGGER.error('Context Not Found')
raise Exception('Context Not Found')
def get_context(self):
"""
Create a dict with the context data
context is not required, but if it
is defined it should be a tuple
"""
if not self.context:
return
else:
assert isinstance(self.context, tuple), 'Expected a Tuple not {0}'.format(type(self.context))
for model in self.context:
model_cls = utils.get_model_class(model)
key = utils.camel_to_snake(model_cls.__name__)
self.context_data[key] = self.get_instance_of(model_cls)
def get_extra_context(self):
"""
Override this method if you want to provide
extra context. The extra_context must be a dict.
Be very careful no validation is being performed.
"""
return {}
def get_context_data(self):
"""
Context Data is equal to context + extra_context
Merge the dicts context_data and extra_context and
update state
"""
self.get_context()
self.context_data.update(self.get_extra_context())
return self.context_data
|
solocompt/plugs-mail
|
plugs_mail/management/commands/load_email_templates.py
|
Command.override_default_templates
|
python
|
def override_default_templates(self):
if plugs_mail_settings['OVERRIDE_TEMPLATE_DIR']:
dir_ = plugs_mail_settings['OVERRIDE_TEMPLATE_DIR']
for file_ in os.listdir(dir_):
if file_.endswith(('.html', 'txt')):
self.overrides[file_] = dir_
|
Override the default emails already defined by other apps
|
train
|
https://github.com/solocompt/plugs-mail/blob/6139fa79ddb437562db1769d03bd3098c25a06fa/plugs_mail/management/commands/load_email_templates.py#L29-L37
| null |
class Command(BaseCommand):
overrides = {}
def handle(self, *args, **options):
self.override_default_templates()
templates = self.get_apps()
count = self.create_templates(templates)
if count:
self.stdout.write(self.style.SUCCESS('Successfully loaded %s email templates' % count))
else:
self.stdout.write(self.style.SUCCESS('No email templates to load'))
def get_apps(self):
"""
Get the list of installed apps
and return the apps that have
an emails module
"""
templates = []
for app in settings.INSTALLED_APPS:
try:
app = import_module(app + '.emails')
templates += self.get_plugs_mail_classes(app)
except ImportError:
pass
return templates
def get_members(self, app):
return inspect.getmembers(app)
def get_templates_files_in_dir(self, dir_):
return os.listdir(dir_)
def get_template_files(self, location, class_name):
"""
Multilanguage support means that for each template
we can have multiple templtate files, this methods
returns all the template (html and txt) files
that match the (class) template name
"""
template_name = utils.camel_to_snake(class_name)
dir_ = location[:-9] + 'templates/emails/'
files_ = []
for file_ in self.get_templates_files_in_dir(dir_):
if file_.startswith(template_name) and file_.endswith(('.html', '.txt')):
if file_ in self.overrides:
files_.append(self.overrides[file_] + file_)
else:
files_.append(dir_ + file_)
return files_
def get_plugs_mail_classes(self, app):
"""
Returns a list of tuples, but it should
return a list of dicts
"""
classes = []
members = self.get_members(app)
for member in members:
name, cls = member
if inspect.isclass(cls) and issubclass(cls, PlugsMail) and name != 'PlugsMail':
files_ = self.get_template_files(app.__file__, name)
for file_ in files_:
try:
description = cls.description
location = file_
language = self.get_template_language(location)
classes.append((name, location, description, language))
except AttributeError:
raise AttributeError('Email class must specify email description.')
return classes
def get_template_language(self, file_):
"""
Return the template language
Every template file must end in
with the language code, and the
code must match the ISO_6301 lang code
https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes
valid examples:
account_created_pt.html
payment_created_en.txt
"""
stem = Path(file_).stem
language_code = stem.split('_')[-1:][0]
if len(language_code) != 2:
# TODO naive and temp implementation
# check if the two chars correspond to one of the
# available languages
raise Exception('Template file `%s` must end in ISO_639-1 language code.' % file_)
return language_code.lower()
def get_subject(self, text):
"""
Email template subject is the first
line of the email template, we can optionally
add SUBJECT: to make it clearer
"""
first_line = text.splitlines(True)[0]
# TODO second line should be empty
if first_line.startswith('SUBJECT:'):
subject = first_line[len('SUBJECT:'):]
else:
subject = first_line
return subject.strip()
def get_html_content(self, text):
"""
Parse content and return html
"""
lines = text.splitlines(True)
return ''.join(lines[2:])
def create_templates(self, templates):
"""
Gets a list of templates to insert into the database
"""
count = 0
for template in templates:
if not self.template_exists_db(template):
name, location, description, language = template
text = self.open_file(location)
html_content = self.get_html_content(text)
data = {
'name': utils.camel_to_snake(name).upper(),
'html_content': html_content,
'content': self.text_version(html_content),
'subject': self.get_subject(text),
'description': description,
'language': language
}
if models.EmailTemplate.objects.create(**data):
count += 1
return count
def text_version(self, html):
"""
Uses util to create a text email template
from a html one
"""
return utils.html_to_text(html)
def open_file(self, file_):
"""
Receives a file path has input and returns a
string with the contents of the file
"""
with open(file_, 'r', encoding='utf-8') as file:
text = ''
for line in file:
text += line
return text
def template_exists_db(self, template):
"""
Receives a template and checks if it exists in the database
using the template name and language
"""
name = utils.camel_to_snake(template[0]).upper()
language = utils.camel_to_snake(template[3])
try:
models.EmailTemplate.objects.get(name=name, language=language)
except models.EmailTemplate.DoesNotExist:
return False
return True
|
solocompt/plugs-mail
|
plugs_mail/management/commands/load_email_templates.py
|
Command.get_apps
|
python
|
def get_apps(self):
templates = []
for app in settings.INSTALLED_APPS:
try:
app = import_module(app + '.emails')
templates += self.get_plugs_mail_classes(app)
except ImportError:
pass
return templates
|
Get the list of installed apps
and return the apps that have
an emails module
|
train
|
https://github.com/solocompt/plugs-mail/blob/6139fa79ddb437562db1769d03bd3098c25a06fa/plugs_mail/management/commands/load_email_templates.py#L39-L52
| null |
class Command(BaseCommand):
overrides = {}
def handle(self, *args, **options):
self.override_default_templates()
templates = self.get_apps()
count = self.create_templates(templates)
if count:
self.stdout.write(self.style.SUCCESS('Successfully loaded %s email templates' % count))
else:
self.stdout.write(self.style.SUCCESS('No email templates to load'))
def override_default_templates(self):
"""
Override the default emails already defined by other apps
"""
if plugs_mail_settings['OVERRIDE_TEMPLATE_DIR']:
dir_ = plugs_mail_settings['OVERRIDE_TEMPLATE_DIR']
for file_ in os.listdir(dir_):
if file_.endswith(('.html', 'txt')):
self.overrides[file_] = dir_
def get_members(self, app):
return inspect.getmembers(app)
def get_templates_files_in_dir(self, dir_):
return os.listdir(dir_)
def get_template_files(self, location, class_name):
"""
Multilanguage support means that for each template
we can have multiple templtate files, this methods
returns all the template (html and txt) files
that match the (class) template name
"""
template_name = utils.camel_to_snake(class_name)
dir_ = location[:-9] + 'templates/emails/'
files_ = []
for file_ in self.get_templates_files_in_dir(dir_):
if file_.startswith(template_name) and file_.endswith(('.html', '.txt')):
if file_ in self.overrides:
files_.append(self.overrides[file_] + file_)
else:
files_.append(dir_ + file_)
return files_
def get_plugs_mail_classes(self, app):
"""
Returns a list of tuples, but it should
return a list of dicts
"""
classes = []
members = self.get_members(app)
for member in members:
name, cls = member
if inspect.isclass(cls) and issubclass(cls, PlugsMail) and name != 'PlugsMail':
files_ = self.get_template_files(app.__file__, name)
for file_ in files_:
try:
description = cls.description
location = file_
language = self.get_template_language(location)
classes.append((name, location, description, language))
except AttributeError:
raise AttributeError('Email class must specify email description.')
return classes
def get_template_language(self, file_):
"""
Return the template language
Every template file must end in
with the language code, and the
code must match the ISO_6301 lang code
https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes
valid examples:
account_created_pt.html
payment_created_en.txt
"""
stem = Path(file_).stem
language_code = stem.split('_')[-1:][0]
if len(language_code) != 2:
# TODO naive and temp implementation
# check if the two chars correspond to one of the
# available languages
raise Exception('Template file `%s` must end in ISO_639-1 language code.' % file_)
return language_code.lower()
def get_subject(self, text):
"""
Email template subject is the first
line of the email template, we can optionally
add SUBJECT: to make it clearer
"""
first_line = text.splitlines(True)[0]
# TODO second line should be empty
if first_line.startswith('SUBJECT:'):
subject = first_line[len('SUBJECT:'):]
else:
subject = first_line
return subject.strip()
def get_html_content(self, text):
"""
Parse content and return html
"""
lines = text.splitlines(True)
return ''.join(lines[2:])
def create_templates(self, templates):
"""
Gets a list of templates to insert into the database
"""
count = 0
for template in templates:
if not self.template_exists_db(template):
name, location, description, language = template
text = self.open_file(location)
html_content = self.get_html_content(text)
data = {
'name': utils.camel_to_snake(name).upper(),
'html_content': html_content,
'content': self.text_version(html_content),
'subject': self.get_subject(text),
'description': description,
'language': language
}
if models.EmailTemplate.objects.create(**data):
count += 1
return count
def text_version(self, html):
"""
Uses util to create a text email template
from a html one
"""
return utils.html_to_text(html)
def open_file(self, file_):
"""
Receives a file path has input and returns a
string with the contents of the file
"""
with open(file_, 'r', encoding='utf-8') as file:
text = ''
for line in file:
text += line
return text
def template_exists_db(self, template):
"""
Receives a template and checks if it exists in the database
using the template name and language
"""
name = utils.camel_to_snake(template[0]).upper()
language = utils.camel_to_snake(template[3])
try:
models.EmailTemplate.objects.get(name=name, language=language)
except models.EmailTemplate.DoesNotExist:
return False
return True
|
solocompt/plugs-mail
|
plugs_mail/management/commands/load_email_templates.py
|
Command.get_template_files
|
python
|
def get_template_files(self, location, class_name):
template_name = utils.camel_to_snake(class_name)
dir_ = location[:-9] + 'templates/emails/'
files_ = []
for file_ in self.get_templates_files_in_dir(dir_):
if file_.startswith(template_name) and file_.endswith(('.html', '.txt')):
if file_ in self.overrides:
files_.append(self.overrides[file_] + file_)
else:
files_.append(dir_ + file_)
return files_
|
Multilanguage support means that for each template
we can have multiple templtate files, this methods
returns all the template (html and txt) files
that match the (class) template name
|
train
|
https://github.com/solocompt/plugs-mail/blob/6139fa79ddb437562db1769d03bd3098c25a06fa/plugs_mail/management/commands/load_email_templates.py#L60-L76
| null |
class Command(BaseCommand):
overrides = {}
def handle(self, *args, **options):
self.override_default_templates()
templates = self.get_apps()
count = self.create_templates(templates)
if count:
self.stdout.write(self.style.SUCCESS('Successfully loaded %s email templates' % count))
else:
self.stdout.write(self.style.SUCCESS('No email templates to load'))
def override_default_templates(self):
"""
Override the default emails already defined by other apps
"""
if plugs_mail_settings['OVERRIDE_TEMPLATE_DIR']:
dir_ = plugs_mail_settings['OVERRIDE_TEMPLATE_DIR']
for file_ in os.listdir(dir_):
if file_.endswith(('.html', 'txt')):
self.overrides[file_] = dir_
def get_apps(self):
"""
Get the list of installed apps
and return the apps that have
an emails module
"""
templates = []
for app in settings.INSTALLED_APPS:
try:
app = import_module(app + '.emails')
templates += self.get_plugs_mail_classes(app)
except ImportError:
pass
return templates
def get_members(self, app):
return inspect.getmembers(app)
def get_templates_files_in_dir(self, dir_):
return os.listdir(dir_)
def get_plugs_mail_classes(self, app):
"""
Returns a list of tuples, but it should
return a list of dicts
"""
classes = []
members = self.get_members(app)
for member in members:
name, cls = member
if inspect.isclass(cls) and issubclass(cls, PlugsMail) and name != 'PlugsMail':
files_ = self.get_template_files(app.__file__, name)
for file_ in files_:
try:
description = cls.description
location = file_
language = self.get_template_language(location)
classes.append((name, location, description, language))
except AttributeError:
raise AttributeError('Email class must specify email description.')
return classes
def get_template_language(self, file_):
"""
Return the template language
Every template file must end in
with the language code, and the
code must match the ISO_6301 lang code
https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes
valid examples:
account_created_pt.html
payment_created_en.txt
"""
stem = Path(file_).stem
language_code = stem.split('_')[-1:][0]
if len(language_code) != 2:
# TODO naive and temp implementation
# check if the two chars correspond to one of the
# available languages
raise Exception('Template file `%s` must end in ISO_639-1 language code.' % file_)
return language_code.lower()
def get_subject(self, text):
"""
Email template subject is the first
line of the email template, we can optionally
add SUBJECT: to make it clearer
"""
first_line = text.splitlines(True)[0]
# TODO second line should be empty
if first_line.startswith('SUBJECT:'):
subject = first_line[len('SUBJECT:'):]
else:
subject = first_line
return subject.strip()
def get_html_content(self, text):
"""
Parse content and return html
"""
lines = text.splitlines(True)
return ''.join(lines[2:])
def create_templates(self, templates):
"""
Gets a list of templates to insert into the database
"""
count = 0
for template in templates:
if not self.template_exists_db(template):
name, location, description, language = template
text = self.open_file(location)
html_content = self.get_html_content(text)
data = {
'name': utils.camel_to_snake(name).upper(),
'html_content': html_content,
'content': self.text_version(html_content),
'subject': self.get_subject(text),
'description': description,
'language': language
}
if models.EmailTemplate.objects.create(**data):
count += 1
return count
def text_version(self, html):
"""
Uses util to create a text email template
from a html one
"""
return utils.html_to_text(html)
def open_file(self, file_):
"""
Receives a file path has input and returns a
string with the contents of the file
"""
with open(file_, 'r', encoding='utf-8') as file:
text = ''
for line in file:
text += line
return text
def template_exists_db(self, template):
"""
Receives a template and checks if it exists in the database
using the template name and language
"""
name = utils.camel_to_snake(template[0]).upper()
language = utils.camel_to_snake(template[3])
try:
models.EmailTemplate.objects.get(name=name, language=language)
except models.EmailTemplate.DoesNotExist:
return False
return True
|
solocompt/plugs-mail
|
plugs_mail/management/commands/load_email_templates.py
|
Command.get_plugs_mail_classes
|
python
|
def get_plugs_mail_classes(self, app):
classes = []
members = self.get_members(app)
for member in members:
name, cls = member
if inspect.isclass(cls) and issubclass(cls, PlugsMail) and name != 'PlugsMail':
files_ = self.get_template_files(app.__file__, name)
for file_ in files_:
try:
description = cls.description
location = file_
language = self.get_template_language(location)
classes.append((name, location, description, language))
except AttributeError:
raise AttributeError('Email class must specify email description.')
return classes
|
Returns a list of tuples, but it should
return a list of dicts
|
train
|
https://github.com/solocompt/plugs-mail/blob/6139fa79ddb437562db1769d03bd3098c25a06fa/plugs_mail/management/commands/load_email_templates.py#L78-L97
| null |
class Command(BaseCommand):
overrides = {}
def handle(self, *args, **options):
self.override_default_templates()
templates = self.get_apps()
count = self.create_templates(templates)
if count:
self.stdout.write(self.style.SUCCESS('Successfully loaded %s email templates' % count))
else:
self.stdout.write(self.style.SUCCESS('No email templates to load'))
def override_default_templates(self):
"""
Override the default emails already defined by other apps
"""
if plugs_mail_settings['OVERRIDE_TEMPLATE_DIR']:
dir_ = plugs_mail_settings['OVERRIDE_TEMPLATE_DIR']
for file_ in os.listdir(dir_):
if file_.endswith(('.html', 'txt')):
self.overrides[file_] = dir_
def get_apps(self):
"""
Get the list of installed apps
and return the apps that have
an emails module
"""
templates = []
for app in settings.INSTALLED_APPS:
try:
app = import_module(app + '.emails')
templates += self.get_plugs_mail_classes(app)
except ImportError:
pass
return templates
def get_members(self, app):
return inspect.getmembers(app)
def get_templates_files_in_dir(self, dir_):
return os.listdir(dir_)
def get_template_files(self, location, class_name):
"""
Multilanguage support means that for each template
we can have multiple templtate files, this methods
returns all the template (html and txt) files
that match the (class) template name
"""
template_name = utils.camel_to_snake(class_name)
dir_ = location[:-9] + 'templates/emails/'
files_ = []
for file_ in self.get_templates_files_in_dir(dir_):
if file_.startswith(template_name) and file_.endswith(('.html', '.txt')):
if file_ in self.overrides:
files_.append(self.overrides[file_] + file_)
else:
files_.append(dir_ + file_)
return files_
def get_template_language(self, file_):
"""
Return the template language
Every template file must end in
with the language code, and the
code must match the ISO_6301 lang code
https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes
valid examples:
account_created_pt.html
payment_created_en.txt
"""
stem = Path(file_).stem
language_code = stem.split('_')[-1:][0]
if len(language_code) != 2:
# TODO naive and temp implementation
# check if the two chars correspond to one of the
# available languages
raise Exception('Template file `%s` must end in ISO_639-1 language code.' % file_)
return language_code.lower()
def get_subject(self, text):
"""
Email template subject is the first
line of the email template, we can optionally
add SUBJECT: to make it clearer
"""
first_line = text.splitlines(True)[0]
# TODO second line should be empty
if first_line.startswith('SUBJECT:'):
subject = first_line[len('SUBJECT:'):]
else:
subject = first_line
return subject.strip()
def get_html_content(self, text):
"""
Parse content and return html
"""
lines = text.splitlines(True)
return ''.join(lines[2:])
def create_templates(self, templates):
"""
Gets a list of templates to insert into the database
"""
count = 0
for template in templates:
if not self.template_exists_db(template):
name, location, description, language = template
text = self.open_file(location)
html_content = self.get_html_content(text)
data = {
'name': utils.camel_to_snake(name).upper(),
'html_content': html_content,
'content': self.text_version(html_content),
'subject': self.get_subject(text),
'description': description,
'language': language
}
if models.EmailTemplate.objects.create(**data):
count += 1
return count
def text_version(self, html):
"""
Uses util to create a text email template
from a html one
"""
return utils.html_to_text(html)
def open_file(self, file_):
"""
Receives a file path has input and returns a
string with the contents of the file
"""
with open(file_, 'r', encoding='utf-8') as file:
text = ''
for line in file:
text += line
return text
def template_exists_db(self, template):
"""
Receives a template and checks if it exists in the database
using the template name and language
"""
name = utils.camel_to_snake(template[0]).upper()
language = utils.camel_to_snake(template[3])
try:
models.EmailTemplate.objects.get(name=name, language=language)
except models.EmailTemplate.DoesNotExist:
return False
return True
|
solocompt/plugs-mail
|
plugs_mail/management/commands/load_email_templates.py
|
Command.get_template_language
|
python
|
def get_template_language(self, file_):
stem = Path(file_).stem
language_code = stem.split('_')[-1:][0]
if len(language_code) != 2:
# TODO naive and temp implementation
# check if the two chars correspond to one of the
# available languages
raise Exception('Template file `%s` must end in ISO_639-1 language code.' % file_)
return language_code.lower()
|
Return the template language
Every template file must end in
with the language code, and the
code must match the ISO_6301 lang code
https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes
valid examples:
account_created_pt.html
payment_created_en.txt
|
train
|
https://github.com/solocompt/plugs-mail/blob/6139fa79ddb437562db1769d03bd3098c25a06fa/plugs_mail/management/commands/load_email_templates.py#L99-L118
| null |
class Command(BaseCommand):
overrides = {}
def handle(self, *args, **options):
self.override_default_templates()
templates = self.get_apps()
count = self.create_templates(templates)
if count:
self.stdout.write(self.style.SUCCESS('Successfully loaded %s email templates' % count))
else:
self.stdout.write(self.style.SUCCESS('No email templates to load'))
def override_default_templates(self):
"""
Override the default emails already defined by other apps
"""
if plugs_mail_settings['OVERRIDE_TEMPLATE_DIR']:
dir_ = plugs_mail_settings['OVERRIDE_TEMPLATE_DIR']
for file_ in os.listdir(dir_):
if file_.endswith(('.html', 'txt')):
self.overrides[file_] = dir_
def get_apps(self):
"""
Get the list of installed apps
and return the apps that have
an emails module
"""
templates = []
for app in settings.INSTALLED_APPS:
try:
app = import_module(app + '.emails')
templates += self.get_plugs_mail_classes(app)
except ImportError:
pass
return templates
def get_members(self, app):
return inspect.getmembers(app)
def get_templates_files_in_dir(self, dir_):
return os.listdir(dir_)
def get_template_files(self, location, class_name):
"""
Multilanguage support means that for each template
we can have multiple templtate files, this methods
returns all the template (html and txt) files
that match the (class) template name
"""
template_name = utils.camel_to_snake(class_name)
dir_ = location[:-9] + 'templates/emails/'
files_ = []
for file_ in self.get_templates_files_in_dir(dir_):
if file_.startswith(template_name) and file_.endswith(('.html', '.txt')):
if file_ in self.overrides:
files_.append(self.overrides[file_] + file_)
else:
files_.append(dir_ + file_)
return files_
def get_plugs_mail_classes(self, app):
"""
Returns a list of tuples, but it should
return a list of dicts
"""
classes = []
members = self.get_members(app)
for member in members:
name, cls = member
if inspect.isclass(cls) and issubclass(cls, PlugsMail) and name != 'PlugsMail':
files_ = self.get_template_files(app.__file__, name)
for file_ in files_:
try:
description = cls.description
location = file_
language = self.get_template_language(location)
classes.append((name, location, description, language))
except AttributeError:
raise AttributeError('Email class must specify email description.')
return classes
def get_subject(self, text):
"""
Email template subject is the first
line of the email template, we can optionally
add SUBJECT: to make it clearer
"""
first_line = text.splitlines(True)[0]
# TODO second line should be empty
if first_line.startswith('SUBJECT:'):
subject = first_line[len('SUBJECT:'):]
else:
subject = first_line
return subject.strip()
def get_html_content(self, text):
"""
Parse content and return html
"""
lines = text.splitlines(True)
return ''.join(lines[2:])
def create_templates(self, templates):
"""
Gets a list of templates to insert into the database
"""
count = 0
for template in templates:
if not self.template_exists_db(template):
name, location, description, language = template
text = self.open_file(location)
html_content = self.get_html_content(text)
data = {
'name': utils.camel_to_snake(name).upper(),
'html_content': html_content,
'content': self.text_version(html_content),
'subject': self.get_subject(text),
'description': description,
'language': language
}
if models.EmailTemplate.objects.create(**data):
count += 1
return count
def text_version(self, html):
"""
Uses util to create a text email template
from a html one
"""
return utils.html_to_text(html)
def open_file(self, file_):
"""
Receives a file path has input and returns a
string with the contents of the file
"""
with open(file_, 'r', encoding='utf-8') as file:
text = ''
for line in file:
text += line
return text
def template_exists_db(self, template):
"""
Receives a template and checks if it exists in the database
using the template name and language
"""
name = utils.camel_to_snake(template[0]).upper()
language = utils.camel_to_snake(template[3])
try:
models.EmailTemplate.objects.get(name=name, language=language)
except models.EmailTemplate.DoesNotExist:
return False
return True
|
solocompt/plugs-mail
|
plugs_mail/management/commands/load_email_templates.py
|
Command.get_subject
|
python
|
def get_subject(self, text):
first_line = text.splitlines(True)[0]
# TODO second line should be empty
if first_line.startswith('SUBJECT:'):
subject = first_line[len('SUBJECT:'):]
else:
subject = first_line
return subject.strip()
|
Email template subject is the first
line of the email template, we can optionally
add SUBJECT: to make it clearer
|
train
|
https://github.com/solocompt/plugs-mail/blob/6139fa79ddb437562db1769d03bd3098c25a06fa/plugs_mail/management/commands/load_email_templates.py#L120-L132
| null |
class Command(BaseCommand):
overrides = {}
def handle(self, *args, **options):
self.override_default_templates()
templates = self.get_apps()
count = self.create_templates(templates)
if count:
self.stdout.write(self.style.SUCCESS('Successfully loaded %s email templates' % count))
else:
self.stdout.write(self.style.SUCCESS('No email templates to load'))
def override_default_templates(self):
"""
Override the default emails already defined by other apps
"""
if plugs_mail_settings['OVERRIDE_TEMPLATE_DIR']:
dir_ = plugs_mail_settings['OVERRIDE_TEMPLATE_DIR']
for file_ in os.listdir(dir_):
if file_.endswith(('.html', 'txt')):
self.overrides[file_] = dir_
def get_apps(self):
"""
Get the list of installed apps
and return the apps that have
an emails module
"""
templates = []
for app in settings.INSTALLED_APPS:
try:
app = import_module(app + '.emails')
templates += self.get_plugs_mail_classes(app)
except ImportError:
pass
return templates
def get_members(self, app):
return inspect.getmembers(app)
def get_templates_files_in_dir(self, dir_):
return os.listdir(dir_)
def get_template_files(self, location, class_name):
"""
Multilanguage support means that for each template
we can have multiple templtate files, this methods
returns all the template (html and txt) files
that match the (class) template name
"""
template_name = utils.camel_to_snake(class_name)
dir_ = location[:-9] + 'templates/emails/'
files_ = []
for file_ in self.get_templates_files_in_dir(dir_):
if file_.startswith(template_name) and file_.endswith(('.html', '.txt')):
if file_ in self.overrides:
files_.append(self.overrides[file_] + file_)
else:
files_.append(dir_ + file_)
return files_
def get_plugs_mail_classes(self, app):
"""
Returns a list of tuples, but it should
return a list of dicts
"""
classes = []
members = self.get_members(app)
for member in members:
name, cls = member
if inspect.isclass(cls) and issubclass(cls, PlugsMail) and name != 'PlugsMail':
files_ = self.get_template_files(app.__file__, name)
for file_ in files_:
try:
description = cls.description
location = file_
language = self.get_template_language(location)
classes.append((name, location, description, language))
except AttributeError:
raise AttributeError('Email class must specify email description.')
return classes
def get_template_language(self, file_):
"""
Return the template language
Every template file must end in
with the language code, and the
code must match the ISO_6301 lang code
https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes
valid examples:
account_created_pt.html
payment_created_en.txt
"""
stem = Path(file_).stem
language_code = stem.split('_')[-1:][0]
if len(language_code) != 2:
# TODO naive and temp implementation
# check if the two chars correspond to one of the
# available languages
raise Exception('Template file `%s` must end in ISO_639-1 language code.' % file_)
return language_code.lower()
def get_html_content(self, text):
"""
Parse content and return html
"""
lines = text.splitlines(True)
return ''.join(lines[2:])
def create_templates(self, templates):
"""
Gets a list of templates to insert into the database
"""
count = 0
for template in templates:
if not self.template_exists_db(template):
name, location, description, language = template
text = self.open_file(location)
html_content = self.get_html_content(text)
data = {
'name': utils.camel_to_snake(name).upper(),
'html_content': html_content,
'content': self.text_version(html_content),
'subject': self.get_subject(text),
'description': description,
'language': language
}
if models.EmailTemplate.objects.create(**data):
count += 1
return count
def text_version(self, html):
"""
Uses util to create a text email template
from a html one
"""
return utils.html_to_text(html)
def open_file(self, file_):
"""
Receives a file path has input and returns a
string with the contents of the file
"""
with open(file_, 'r', encoding='utf-8') as file:
text = ''
for line in file:
text += line
return text
def template_exists_db(self, template):
"""
Receives a template and checks if it exists in the database
using the template name and language
"""
name = utils.camel_to_snake(template[0]).upper()
language = utils.camel_to_snake(template[3])
try:
models.EmailTemplate.objects.get(name=name, language=language)
except models.EmailTemplate.DoesNotExist:
return False
return True
|
solocompt/plugs-mail
|
plugs_mail/management/commands/load_email_templates.py
|
Command.create_templates
|
python
|
def create_templates(self, templates):
count = 0
for template in templates:
if not self.template_exists_db(template):
name, location, description, language = template
text = self.open_file(location)
html_content = self.get_html_content(text)
data = {
'name': utils.camel_to_snake(name).upper(),
'html_content': html_content,
'content': self.text_version(html_content),
'subject': self.get_subject(text),
'description': description,
'language': language
}
if models.EmailTemplate.objects.create(**data):
count += 1
return count
|
Gets a list of templates to insert into the database
|
train
|
https://github.com/solocompt/plugs-mail/blob/6139fa79ddb437562db1769d03bd3098c25a06fa/plugs_mail/management/commands/load_email_templates.py#L141-L161
| null |
class Command(BaseCommand):
overrides = {}
def handle(self, *args, **options):
self.override_default_templates()
templates = self.get_apps()
count = self.create_templates(templates)
if count:
self.stdout.write(self.style.SUCCESS('Successfully loaded %s email templates' % count))
else:
self.stdout.write(self.style.SUCCESS('No email templates to load'))
def override_default_templates(self):
"""
Override the default emails already defined by other apps
"""
if plugs_mail_settings['OVERRIDE_TEMPLATE_DIR']:
dir_ = plugs_mail_settings['OVERRIDE_TEMPLATE_DIR']
for file_ in os.listdir(dir_):
if file_.endswith(('.html', 'txt')):
self.overrides[file_] = dir_
def get_apps(self):
"""
Get the list of installed apps
and return the apps that have
an emails module
"""
templates = []
for app in settings.INSTALLED_APPS:
try:
app = import_module(app + '.emails')
templates += self.get_plugs_mail_classes(app)
except ImportError:
pass
return templates
def get_members(self, app):
return inspect.getmembers(app)
def get_templates_files_in_dir(self, dir_):
return os.listdir(dir_)
def get_template_files(self, location, class_name):
"""
Multilanguage support means that for each template
we can have multiple templtate files, this methods
returns all the template (html and txt) files
that match the (class) template name
"""
template_name = utils.camel_to_snake(class_name)
dir_ = location[:-9] + 'templates/emails/'
files_ = []
for file_ in self.get_templates_files_in_dir(dir_):
if file_.startswith(template_name) and file_.endswith(('.html', '.txt')):
if file_ in self.overrides:
files_.append(self.overrides[file_] + file_)
else:
files_.append(dir_ + file_)
return files_
def get_plugs_mail_classes(self, app):
"""
Returns a list of tuples, but it should
return a list of dicts
"""
classes = []
members = self.get_members(app)
for member in members:
name, cls = member
if inspect.isclass(cls) and issubclass(cls, PlugsMail) and name != 'PlugsMail':
files_ = self.get_template_files(app.__file__, name)
for file_ in files_:
try:
description = cls.description
location = file_
language = self.get_template_language(location)
classes.append((name, location, description, language))
except AttributeError:
raise AttributeError('Email class must specify email description.')
return classes
def get_template_language(self, file_):
"""
Return the template language
Every template file must end in
with the language code, and the
code must match the ISO_6301 lang code
https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes
valid examples:
account_created_pt.html
payment_created_en.txt
"""
stem = Path(file_).stem
language_code = stem.split('_')[-1:][0]
if len(language_code) != 2:
# TODO naive and temp implementation
# check if the two chars correspond to one of the
# available languages
raise Exception('Template file `%s` must end in ISO_639-1 language code.' % file_)
return language_code.lower()
def get_subject(self, text):
"""
Email template subject is the first
line of the email template, we can optionally
add SUBJECT: to make it clearer
"""
first_line = text.splitlines(True)[0]
# TODO second line should be empty
if first_line.startswith('SUBJECT:'):
subject = first_line[len('SUBJECT:'):]
else:
subject = first_line
return subject.strip()
def get_html_content(self, text):
"""
Parse content and return html
"""
lines = text.splitlines(True)
return ''.join(lines[2:])
def text_version(self, html):
"""
Uses util to create a text email template
from a html one
"""
return utils.html_to_text(html)
def open_file(self, file_):
"""
Receives a file path has input and returns a
string with the contents of the file
"""
with open(file_, 'r', encoding='utf-8') as file:
text = ''
for line in file:
text += line
return text
def template_exists_db(self, template):
"""
Receives a template and checks if it exists in the database
using the template name and language
"""
name = utils.camel_to_snake(template[0]).upper()
language = utils.camel_to_snake(template[3])
try:
models.EmailTemplate.objects.get(name=name, language=language)
except models.EmailTemplate.DoesNotExist:
return False
return True
|
solocompt/plugs-mail
|
plugs_mail/management/commands/load_email_templates.py
|
Command.open_file
|
python
|
def open_file(self, file_):
with open(file_, 'r', encoding='utf-8') as file:
text = ''
for line in file:
text += line
return text
|
Receives a file path has input and returns a
string with the contents of the file
|
train
|
https://github.com/solocompt/plugs-mail/blob/6139fa79ddb437562db1769d03bd3098c25a06fa/plugs_mail/management/commands/load_email_templates.py#L171-L180
| null |
class Command(BaseCommand):
overrides = {}
def handle(self, *args, **options):
self.override_default_templates()
templates = self.get_apps()
count = self.create_templates(templates)
if count:
self.stdout.write(self.style.SUCCESS('Successfully loaded %s email templates' % count))
else:
self.stdout.write(self.style.SUCCESS('No email templates to load'))
def override_default_templates(self):
"""
Override the default emails already defined by other apps
"""
if plugs_mail_settings['OVERRIDE_TEMPLATE_DIR']:
dir_ = plugs_mail_settings['OVERRIDE_TEMPLATE_DIR']
for file_ in os.listdir(dir_):
if file_.endswith(('.html', 'txt')):
self.overrides[file_] = dir_
def get_apps(self):
"""
Get the list of installed apps
and return the apps that have
an emails module
"""
templates = []
for app in settings.INSTALLED_APPS:
try:
app = import_module(app + '.emails')
templates += self.get_plugs_mail_classes(app)
except ImportError:
pass
return templates
def get_members(self, app):
return inspect.getmembers(app)
def get_templates_files_in_dir(self, dir_):
return os.listdir(dir_)
def get_template_files(self, location, class_name):
"""
Multilanguage support means that for each template
we can have multiple templtate files, this methods
returns all the template (html and txt) files
that match the (class) template name
"""
template_name = utils.camel_to_snake(class_name)
dir_ = location[:-9] + 'templates/emails/'
files_ = []
for file_ in self.get_templates_files_in_dir(dir_):
if file_.startswith(template_name) and file_.endswith(('.html', '.txt')):
if file_ in self.overrides:
files_.append(self.overrides[file_] + file_)
else:
files_.append(dir_ + file_)
return files_
def get_plugs_mail_classes(self, app):
"""
Returns a list of tuples, but it should
return a list of dicts
"""
classes = []
members = self.get_members(app)
for member in members:
name, cls = member
if inspect.isclass(cls) and issubclass(cls, PlugsMail) and name != 'PlugsMail':
files_ = self.get_template_files(app.__file__, name)
for file_ in files_:
try:
description = cls.description
location = file_
language = self.get_template_language(location)
classes.append((name, location, description, language))
except AttributeError:
raise AttributeError('Email class must specify email description.')
return classes
def get_template_language(self, file_):
"""
Return the template language
Every template file must end in
with the language code, and the
code must match the ISO_6301 lang code
https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes
valid examples:
account_created_pt.html
payment_created_en.txt
"""
stem = Path(file_).stem
language_code = stem.split('_')[-1:][0]
if len(language_code) != 2:
# TODO naive and temp implementation
# check if the two chars correspond to one of the
# available languages
raise Exception('Template file `%s` must end in ISO_639-1 language code.' % file_)
return language_code.lower()
def get_subject(self, text):
"""
Email template subject is the first
line of the email template, we can optionally
add SUBJECT: to make it clearer
"""
first_line = text.splitlines(True)[0]
# TODO second line should be empty
if first_line.startswith('SUBJECT:'):
subject = first_line[len('SUBJECT:'):]
else:
subject = first_line
return subject.strip()
def get_html_content(self, text):
"""
Parse content and return html
"""
lines = text.splitlines(True)
return ''.join(lines[2:])
def create_templates(self, templates):
"""
Gets a list of templates to insert into the database
"""
count = 0
for template in templates:
if not self.template_exists_db(template):
name, location, description, language = template
text = self.open_file(location)
html_content = self.get_html_content(text)
data = {
'name': utils.camel_to_snake(name).upper(),
'html_content': html_content,
'content': self.text_version(html_content),
'subject': self.get_subject(text),
'description': description,
'language': language
}
if models.EmailTemplate.objects.create(**data):
count += 1
return count
def text_version(self, html):
"""
Uses util to create a text email template
from a html one
"""
return utils.html_to_text(html)
def template_exists_db(self, template):
"""
Receives a template and checks if it exists in the database
using the template name and language
"""
name = utils.camel_to_snake(template[0]).upper()
language = utils.camel_to_snake(template[3])
try:
models.EmailTemplate.objects.get(name=name, language=language)
except models.EmailTemplate.DoesNotExist:
return False
return True
|
solocompt/plugs-mail
|
plugs_mail/management/commands/load_email_templates.py
|
Command.template_exists_db
|
python
|
def template_exists_db(self, template):
name = utils.camel_to_snake(template[0]).upper()
language = utils.camel_to_snake(template[3])
try:
models.EmailTemplate.objects.get(name=name, language=language)
except models.EmailTemplate.DoesNotExist:
return False
return True
|
Receives a template and checks if it exists in the database
using the template name and language
|
train
|
https://github.com/solocompt/plugs-mail/blob/6139fa79ddb437562db1769d03bd3098c25a06fa/plugs_mail/management/commands/load_email_templates.py#L183-L194
| null |
class Command(BaseCommand):
overrides = {}
def handle(self, *args, **options):
self.override_default_templates()
templates = self.get_apps()
count = self.create_templates(templates)
if count:
self.stdout.write(self.style.SUCCESS('Successfully loaded %s email templates' % count))
else:
self.stdout.write(self.style.SUCCESS('No email templates to load'))
def override_default_templates(self):
"""
Override the default emails already defined by other apps
"""
if plugs_mail_settings['OVERRIDE_TEMPLATE_DIR']:
dir_ = plugs_mail_settings['OVERRIDE_TEMPLATE_DIR']
for file_ in os.listdir(dir_):
if file_.endswith(('.html', 'txt')):
self.overrides[file_] = dir_
def get_apps(self):
"""
Get the list of installed apps
and return the apps that have
an emails module
"""
templates = []
for app in settings.INSTALLED_APPS:
try:
app = import_module(app + '.emails')
templates += self.get_plugs_mail_classes(app)
except ImportError:
pass
return templates
def get_members(self, app):
return inspect.getmembers(app)
def get_templates_files_in_dir(self, dir_):
return os.listdir(dir_)
def get_template_files(self, location, class_name):
"""
Multilanguage support means that for each template
we can have multiple templtate files, this methods
returns all the template (html and txt) files
that match the (class) template name
"""
template_name = utils.camel_to_snake(class_name)
dir_ = location[:-9] + 'templates/emails/'
files_ = []
for file_ in self.get_templates_files_in_dir(dir_):
if file_.startswith(template_name) and file_.endswith(('.html', '.txt')):
if file_ in self.overrides:
files_.append(self.overrides[file_] + file_)
else:
files_.append(dir_ + file_)
return files_
def get_plugs_mail_classes(self, app):
"""
Returns a list of tuples, but it should
return a list of dicts
"""
classes = []
members = self.get_members(app)
for member in members:
name, cls = member
if inspect.isclass(cls) and issubclass(cls, PlugsMail) and name != 'PlugsMail':
files_ = self.get_template_files(app.__file__, name)
for file_ in files_:
try:
description = cls.description
location = file_
language = self.get_template_language(location)
classes.append((name, location, description, language))
except AttributeError:
raise AttributeError('Email class must specify email description.')
return classes
def get_template_language(self, file_):
"""
Return the template language
Every template file must end in
with the language code, and the
code must match the ISO_6301 lang code
https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes
valid examples:
account_created_pt.html
payment_created_en.txt
"""
stem = Path(file_).stem
language_code = stem.split('_')[-1:][0]
if len(language_code) != 2:
# TODO naive and temp implementation
# check if the two chars correspond to one of the
# available languages
raise Exception('Template file `%s` must end in ISO_639-1 language code.' % file_)
return language_code.lower()
def get_subject(self, text):
"""
Email template subject is the first
line of the email template, we can optionally
add SUBJECT: to make it clearer
"""
first_line = text.splitlines(True)[0]
# TODO second line should be empty
if first_line.startswith('SUBJECT:'):
subject = first_line[len('SUBJECT:'):]
else:
subject = first_line
return subject.strip()
def get_html_content(self, text):
"""
Parse content and return html
"""
lines = text.splitlines(True)
return ''.join(lines[2:])
def create_templates(self, templates):
"""
Gets a list of templates to insert into the database
"""
count = 0
for template in templates:
if not self.template_exists_db(template):
name, location, description, language = template
text = self.open_file(location)
html_content = self.get_html_content(text)
data = {
'name': utils.camel_to_snake(name).upper(),
'html_content': html_content,
'content': self.text_version(html_content),
'subject': self.get_subject(text),
'description': description,
'language': language
}
if models.EmailTemplate.objects.create(**data):
count += 1
return count
def text_version(self, html):
"""
Uses util to create a text email template
from a html one
"""
return utils.html_to_text(html)
def open_file(self, file_):
"""
Receives a file path has input and returns a
string with the contents of the file
"""
with open(file_, 'r', encoding='utf-8') as file:
text = ''
for line in file:
text += line
return text
|
tetframework/Tonnikala
|
tonnikala/ir/generate.py
|
BaseIRGenerator.merge_text_nodes_on
|
python
|
def merge_text_nodes_on(self, node):
if not isinstance(node, ContainerNode) or not node.children:
return
new_children = []
text_run = []
for i in node.children:
if isinstance(i, Text) and not i.translatable:
text_run.append(i.escaped())
else:
if text_run:
new_children.append(EscapedText(''.join(text_run)))
text_run = []
new_children.append(i)
if text_run:
new_children.append(EscapedText(''.join(text_run)))
node.children = new_children
for i in node.children:
self.merge_text_nodes_on(i)
|
Merges all consecutive non-translatable text nodes into one
|
train
|
https://github.com/tetframework/Tonnikala/blob/99d168657da1b2372ff898254f80808ea8d1b83f/tonnikala/ir/generate.py#L57-L80
|
[
"def merge_text_nodes_on(self, node):\n \"\"\"Merges all consecutive non-translatable text nodes into one\"\"\"\n\n if not isinstance(node, ContainerNode) or not node.children:\n return\n\n new_children = []\n text_run = []\n for i in node.children:\n if isinstance(i, Text) and not i.translatable:\n text_run.append(i.escaped())\n else:\n if text_run:\n new_children.append(EscapedText(''.join(text_run)))\n text_run = []\n\n new_children.append(i)\n\n if text_run:\n new_children.append(EscapedText(''.join(text_run)))\n\n node.children = new_children\n for i in node.children:\n self.merge_text_nodes_on(i)\n"
] |
class BaseIRGenerator(object):
def __init__(self, filename=None, source=None, *a, **kw):
super(BaseIRGenerator, self).__init__(*a, **kw)
self.filename = filename
self.source = source
self.states = [{}]
self.tree = IRTree()
def syntax_error(self, message, lineno=None):
raise TemplateSyntaxError(
message,
lineno,
source=self.source,
filename=self.filename)
def merge_text_nodes(self, tree):
root = tree.root
self.merge_text_nodes_on(root)
return tree
@property
def state(self):
"""
Return the current state from the state stack
"""
return self.states[-1]
def push_state(self):
"""
Push a copy of the topmost state on top of the state stack,
returns the new top.
"""
new = dict(self.states[-1])
self.states.append(new)
return self.state
def pop_state(self):
"""
Pop the topmost state from the state stack, return
the *new* top
"""
self.states.pop()
return self.state
|
tetframework/Tonnikala
|
tonnikala/ir/generate.py
|
BaseIRGenerator.push_state
|
python
|
def push_state(self):
new = dict(self.states[-1])
self.states.append(new)
return self.state
|
Push a copy of the topmost state on top of the state stack,
returns the new top.
|
train
|
https://github.com/tetframework/Tonnikala/blob/99d168657da1b2372ff898254f80808ea8d1b83f/tonnikala/ir/generate.py#L95-L103
| null |
class BaseIRGenerator(object):
def __init__(self, filename=None, source=None, *a, **kw):
super(BaseIRGenerator, self).__init__(*a, **kw)
self.filename = filename
self.source = source
self.states = [{}]
self.tree = IRTree()
def syntax_error(self, message, lineno=None):
raise TemplateSyntaxError(
message,
lineno,
source=self.source,
filename=self.filename)
def merge_text_nodes_on(self, node):
"""Merges all consecutive non-translatable text nodes into one"""
if not isinstance(node, ContainerNode) or not node.children:
return
new_children = []
text_run = []
for i in node.children:
if isinstance(i, Text) and not i.translatable:
text_run.append(i.escaped())
else:
if text_run:
new_children.append(EscapedText(''.join(text_run)))
text_run = []
new_children.append(i)
if text_run:
new_children.append(EscapedText(''.join(text_run)))
node.children = new_children
for i in node.children:
self.merge_text_nodes_on(i)
def merge_text_nodes(self, tree):
root = tree.root
self.merge_text_nodes_on(root)
return tree
@property
def state(self):
"""
Return the current state from the state stack
"""
return self.states[-1]
def pop_state(self):
"""
Pop the topmost state from the state stack, return
the *new* top
"""
self.states.pop()
return self.state
|
tetframework/Tonnikala
|
tonnikala/ir/generate.py
|
BaseDOMIRGenerator.enter_node
|
python
|
def enter_node(self, ir_node):
this_is_cdata = (isinstance(ir_node, Element)
and ir_node.name in self.cdata_elements)
self.state['is_cdata'] = bool(self.state.get('is_cdata')) or this_is_cdata
|
Enter the given element; keeps track of `cdata`;
subclasses may extend by overriding
|
train
|
https://github.com/tetframework/Tonnikala/blob/99d168657da1b2372ff898254f80808ea8d1b83f/tonnikala/ir/generate.py#L162-L169
| null |
class BaseDOMIRGenerator(BaseIRGenerator):
def __init__(self, document=None, mode='html5', *a, **kw):
super(BaseDOMIRGenerator, self).__init__(*a, **kw)
self.dom_document = document
self.mode = mode
if mode in ['html', 'html5', 'xhtml']:
self.empty_elements = html5_empty_tags
self.cdata_elements = html5_cdata_elements
self.empty_tag_closing_string = ' />'
elif mode == 'xml':
self.empty_elements = all_set()
self.empty_tag_closing_string = '/>'
self.cdata_elements = set()
else: # pragma: no cover
raise ValueError("Unknown render mode '%s'" % mode)
def child_iter(self, node):
current = node.firstChild
while current:
yield current
current = current.nextSibling
def generate_attributes(self, ir_node, attrs=[], dynamic_attrs=None):
if dynamic_attrs:
ir_node.set_dynamic_attrs(dynamic_attrs)
for name, value in attrs:
ir_node.set_attribute(name, value)
def generate_ir_node(self, dom_node): # pragma: no cover
raise NotImplementedError('abstract method not implemented')
def exit_node(self, ir_node):
pass
def add_children(self, children, ir_node):
self.push_state()
self.enter_node(ir_node)
for dom_node in children:
node = self.generate_ir_node(dom_node)
if node:
ir_node.add_child(node)
self.exit_node(ir_node)
self.pop_state()
def render_constant_attributes(self, element):
cattr = element.get_constant_attributes()
code = []
for name, value in cattr.items():
if isinstance(value, EmptyAttrVal):
code.append(' ')
code.append(name)
else:
code.append(' %s="%s"' % (name, value.escaped()))
return ''.join(code)
def get_start_tag_nodes(self, element):
start_tag_nodes = []
pre_text_node = '<%s' % element.name
if element.attributes:
pre_text_node += self.render_constant_attributes(element)
start_tag_nodes.append(EscapedText(pre_text_node))
if element.mutable_attributes:
for n, v in element.mutable_attributes.items():
start_tag_nodes.append(MutableAttribute(n, v))
if element.dynamic_attrs:
start_tag_nodes.append(DynamicAttributes(element.dynamic_attrs))
return start_tag_nodes
def flatten_element_nodes_on(self, node):
new_children = []
recurse = False
for i in node.children:
if not isinstance(i, (Element, Comment)):
new_children.append(i)
continue
elif isinstance(i, Comment):
new_children.append(EscapedText('<!--'))
new_children.append(EscapedText(i.escaped()))
new_children.append(EscapedText('-->'))
else:
# this is complicated because of the stupid strip syntax :)
start_tag_nodes = self.get_start_tag_nodes(i)
end_tag_nodes = []
# if no children, then 1 guard is enough
if not i.children:
if i.name in self.empty_elements:
start_tag_nodes.append(
EscapedText(self.empty_tag_closing_string))
else:
start_tag_nodes.append(EscapedText('></%s>' % i.name))
else:
start_tag_nodes.append(EscapedText('>'))
end_tag_nodes = [EscapedText('</%s>' % i.name)]
child_nodes = []
for j in i.children:
child_nodes.append(j)
if isinstance(j, Element):
recurse = True
# if there is a guard...
guard = i.get_guard_expression()
if guard is not None:
start_tag = Unless(guard)
start_tag.children = start_tag_nodes
start_tag_nodes = [start_tag]
if end_tag_nodes:
end_tag = Unless(guard)
end_tag.children = end_tag_nodes
end_tag_nodes = [end_tag]
new_children.extend(start_tag_nodes)
new_children.extend(child_nodes)
new_children.extend(end_tag_nodes)
node.children = new_children
if recurse:
self.flatten_element_nodes_on(node)
for i in node.children:
if hasattr(i, 'children') and i.children:
self.flatten_element_nodes_on(i)
def flatten_element_nodes(self, tree):
root = tree.root
self.flatten_element_nodes_on(root)
return tree
def generate_tree(self):
root = Root()
self.tree.add_child(root)
self.add_children(self.child_iter(self.dom_document), root)
validator = Validator(self)
root.validate(validator)
return self.tree
|
tetframework/Tonnikala
|
tonnikala/i18n/__init__.py
|
extract_tonnikala
|
python
|
def extract_tonnikala(fileobj, keywords, comment_tags, options):
extractor = TonnikalaExtractor()
for msg in extractor(filename=None, fileobj=fileobj, options=Options()):
msgid = msg.msgid,
prefix = ''
if msg.msgid_plural:
msgid = (msg.msgid_plural,) + msgid
prefix = 'n'
if msg.msgctxt:
msgid = (msg.msgctxt,) + msgid
prefix += 'p'
yield (msg.location[1], prefix + 'gettext', msgid, msg.comment)
|
Extract messages from Tonnikala files.
:param fileobj: the file-like object the messages should be extracted
from
:param keywords: a list of keywords (i.e. function names) that should
be recognized as translation functions
:param comment_tags: a list of translator tags to search for and
include in the results
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)``
tuples
:rtype: ``iterator``
|
train
|
https://github.com/tetframework/Tonnikala/blob/99d168657da1b2372ff898254f80808ea8d1b83f/tonnikala/i18n/__init__.py#L57-L84
| null |
import io
from lingua.extractors import Extractor
from lingua.extractors import Message
from lingua.extractors.python import _extract_python
from tonnikala.ir.nodes import TranslatableText, Expression
from tonnikala.loader import parsers
class TonnikalaExtractor(Extractor):
"Extract strings from tonnikala templates, defaulting to Python expressions"
extensions = ['.tk']
syntax = 'tonnikala'
def parse_python(self, node, filename, lineno, options):
start_line = (node.position[0] or 1) + lineno
for message in _extract_python(
filename,
node.expression,
options,
start_line
):
yield Message(*message[:6],
location=(
filename,
lineno +
message.location[1]))
def __call__(self, filename, options, fileobj=None, lineno=0):
self.filename = filename
if fileobj is None:
fileobj = io.open(filename, encoding='utf-8')
parser_func = parsers.get(self.syntax)
source = fileobj.read()
if isinstance(source, bytes):
source = source.decode('UTF-8')
tree = parser_func(filename, source, translatable=True)
for node in tree:
if isinstance(node, TranslatableText):
yield Message(None, node.text, None, [], u'', u'',
(filename, lineno + (node.position[0] or 1)))
elif isinstance(node, Expression):
for m in self.parse_python(node, filename, lineno, options):
yield m
class Options:
keywords = {}
comment_tag = None
domain = None
|
tetframework/Tonnikala
|
tonnikala/languages/javascript/generator.py
|
FreeVariableAnalyzerVisitor.visit_Identifier
|
python
|
def visit_Identifier(self, node):
if not self._is_mangle_candidate(node):
return
name = node.value
symbol = node.scope.resolve(node.value)
if symbol is None:
self.free_variables.add(name)
|
Mangle names.
|
train
|
https://github.com/tetframework/Tonnikala/blob/99d168657da1b2372ff898254f80808ea8d1b83f/tonnikala/languages/javascript/generator.py#L49-L58
| null |
class FreeVariableAnalyzerVisitor(Visitor):
"""Mangles names.
Walks over a parsed tree and changes ID values to corresponding
mangled names.
"""
def __init__(self):
self.free_variables = set()
@staticmethod
def _is_mangle_candidate(id_node):
"""Return True if Identifier node is a candidate for mangling.
There are 5 cases when Identifier is a mangling candidate:
1. Function declaration identifier
2. Function expression identifier
3. Function declaration/expression parameter
4. Variable declaration identifier
5. Identifier is a part of an expression (primary_expr_no_brace rule)
"""
return getattr(id_node, '_mangle_candidate', False)
|
tetframework/Tonnikala
|
tonnikala/ir/nodes.py
|
Extends.add_child
|
python
|
def add_child(self, child):
if isinstance(child, Comment):
return
# ignore Text nodes with whitespace-only content
if isinstance(child, Text) and not child.text.strip():
return
super(Extends, self).add_child(child)
|
Add a child to the tree. Extends discards all comments
and whitespace Text. On non-whitespace Text, and any
other nodes, raise a syntax error.
|
train
|
https://github.com/tetframework/Tonnikala/blob/99d168657da1b2372ff898254f80808ea8d1b83f/tonnikala/ir/nodes.py#L334-L348
|
[
"def add_child(self, child):\n \"\"\"\n Add a child to the tree. Subclasses may raise SyntaxError\n \"\"\"\n self.children.append(child)\n"
] |
class Extends(ContainerNode):
def __init__(self, href):
super(Extends, self).__init__()
self.href = href
def __str__(self): # pragma: no cover
children = str(self.children)
return ', '.join([("(%s)" % self.expression), children])
def validate(self, validator):
for child in self.children:
if isinstance(child, Text):
validator.syntax_error(
"No Text allowed within an Extends block", node=child)
if not isinstance(child, (Block, Define, Import)):
validator.syntax_error(
"Only nodes of type Block, Import or Define "
"allowed within an Extends block, not %s" %
child.__class__.__name__,
child
)
super(Extends, self).validate(validator)
|
tetframework/Tonnikala
|
tonnikala/expr.py
|
_strip_dollars_fast
|
python
|
def _strip_dollars_fast(text):
def _sub(m):
if m.group(0) == '$$':
return '$'
raise HasExprException()
return _dollar_strip_re.sub(_sub, text)
|
Replace `$$` with `$`. raise immediately
if `$` starting an interpolated expression is found.
@param text: the source text
@return: the text with dollars replaced, or raise
HasExprException if there are interpolated expressions
|
train
|
https://github.com/tetframework/Tonnikala/blob/99d168657da1b2372ff898254f80808ea8d1b83f/tonnikala/expr.py#L17-L32
| null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
"""Tonnikala compiler. Produces source code from XML."""
import re
from tonnikala.ir.nodes import Text, DynamicText, TranslatableText
from tonnikala.languages import python
_dollar_strip_re = re.compile(r'\$[a-zA-Z_{$]')
class HasExprException(Exception):
pass
_expr_find_code = re.compile(r"""
([^$]+) # match any chars except \n or $ (group 1)
| (\$\$) # match double dollars (group 2)
| (\$[{a-zA-Z_]) # match beginning of expressions (group 3)
| (\$)
""", re.VERBOSE | re.DOTALL)
_strip_ws_re = re.compile(r"""
(\s*)
(.*?)
(\s*)$
""", re.VERBOSE | re.DOTALL)
def partition_translatable_text(text):
m = _strip_ws_re.match(text)
return m.groups()
def create_text_nodes(text, is_cdata=False, translatable=False):
if not translatable:
rv = Text(text)
rv.is_cdata = is_cdata
return rv
prefix, this, suffix = partition_translatable_text(text)
rv = []
if prefix:
rv.append(Text(prefix, is_cdata=is_cdata))
if this:
rv.append(TranslatableText(this, is_cdata=is_cdata))
if suffix:
rv.append(Text(suffix, is_cdata=is_cdata))
if len(rv) == 1:
return rv[0]
node = DynamicText()
for i in rv:
node.add_child(i)
return node
def handle_text_node(text, expr_parser=python.parse_expression, is_cdata=False,
translatable=False,
whole_translatable=False):
try:
text = _strip_dollars_fast(text)
return create_text_nodes(text, is_cdata=is_cdata,
translatable=translatable)
except HasExprException:
pass
nodes = []
stringrun = []
max_index = len(text)
pos = 0
while pos < len(text):
m = _expr_find_code.match(text, pos)
pos = m.end()
if m.group(1) != None: # any
stringrun.append(m.group(1))
elif m.group(2): # $$
stringrun.append('$')
elif m.group(3):
if stringrun:
nodes.append(create_text_nodes(''.join(stringrun),
translatable=translatable))
stringrun = []
expr = expr_parser(text, m.start(3))
pos = m.start(3) + len(expr.string)
nodes.append(expr)
else: # group 4, a sole $
stringrun.append('$')
if stringrun:
nodes.append(
create_text_nodes(''.join(stringrun), translatable=translatable))
if len(nodes) == 1:
return nodes[0]
node = DynamicText()
for i in nodes:
node.add_child(i)
node.is_cdata = is_cdata
for i in nodes:
i.is_cdata = is_cdata
i.translatable = translatable
if whole_translatable:
node.translatable = True
return node
|
tetframework/Tonnikala
|
tonnikala/languages/python/generator.py
|
adjust_locations
|
python
|
def adjust_locations(ast_node, first_lineno, first_offset):
line_delta = first_lineno - 1
def _fix(node):
if 'lineno' in node._attributes:
lineno = node.lineno
col = node.col_offset
# adjust the offset on the first line
if lineno == 1:
col += first_offset
lineno += line_delta
node.lineno = lineno
node.col_offset = col
for child in iter_child_nodes(node):
_fix(child)
_fix(ast_node)
|
Adjust the locations of the ast nodes, offsetting them
to the new lineno and column offset
|
train
|
https://github.com/tetframework/Tonnikala/blob/99d168657da1b2372ff898254f80808ea8d1b83f/tonnikala/languages/python/generator.py#L72-L97
|
[
"def _fix(node):\n if 'lineno' in node._attributes:\n lineno = node.lineno\n col = node.col_offset\n\n # adjust the offset on the first line\n if lineno == 1:\n col += first_offset\n\n lineno += line_delta\n\n node.lineno = lineno\n node.col_offset = col\n\n for child in iter_child_nodes(node):\n _fix(child)\n"
] |
# -*- coding: utf-8 -*-
# notice: this module cannot be sanely written to take use of
# unicode_literals, bc some of the arguments need to be str on
# both python2 and 3
from __future__ import absolute_import, division, print_function
import ast
from ast import *
from collections import Iterable
from .astalyzer import FreeVarFinder
from ..base import LanguageNode, ComplexNode, BaseGenerator
from ...compat import string_types, PY2
from ...helpers import StringWithLocation
from ...runtime.debug import TemplateSyntaxError
try: # pragma: no cover
import sysconfig
HAS_ASSERT = bool(sysconfig.get_config_var('Py_DEBUG'))
del sysconfig
except ImportError: # pragma: no cover
HAS_ASSERT = False
name_counter = 0
ALWAYS_BUILTINS = '''
False
True
None
'''.split()
def simple_call(func, args=None):
return Call(func=func, args=args or [], keywords=[], starargs=None,
kwargs=None)
if PY2: # pragma: python2
def create_argument_list(arguments):
return [Name(id=id, ctx=Param()) for id in arguments]
else: # pragma: python 3
def create_argument_list(arguments):
return [arg(arg=id, annotation=None) for id in arguments]
def simple_function_def(name, arguments=()):
arguments = create_argument_list(arguments)
return FunctionDef(
name=name,
args=ast.arguments(
args=arguments,
vararg=None,
varargannotation=None,
kwonlyargs=[],
kwarg=None,
kwargannotation=None,
defaults=[],
kw_defaults=[]),
body=[Pass()],
decorator_list=[],
returns=None
)
def NameX(id, store=False):
return Name(id=id, ctx=Load() if not store else Store())
def get_fragment_ast(expression, mode='eval', adjust=(0, 0)):
if not isinstance(expression, string_types):
return expression
t = None
position = getattr(expression, 'position', (1, 0))
position = position[0] + adjust[0], position[1] + adjust[1]
try:
exp = expression
if expression[-1:] != '\n':
exp = expression + '\n'
tree = ast.parse(exp, mode=mode)
except SyntaxError as e:
lineno = e.lineno
lineno += position[0] - 1
t = TemplateSyntaxError(e.msg, lineno=lineno)
if t:
raise t
adjust_locations(tree, position[0], position[1])
return tree.body
def gen_name(typename=None):
global name_counter
name_counter += 1
if typename:
return "__TK__typed__%s__%d__" % (typename, name_counter)
else:
return "__TK_%d__" % (name_counter)
def static_eval(expr):
if isinstance(expr, UnaryOp) and isinstance(expr.op, Not):
return not static_eval(expr.operand)
return literal_eval(expr)
def static_expr_to_bool(expr):
try:
return bool(static_eval(expr))
except:
return None
class PythonNode(LanguageNode):
is_top_level = False
def generate_output_ast(self, code, generator, parent, escape=False,
position=None):
func = Name(id='__TK__output', ctx=Load())
if not isinstance(code, list):
code = [code]
rv = []
for i in code:
if position is not None:
i.lineno, i.col_offset = position
e = Expr(simple_call(func, [i]))
e.output_args = [i]
rv.append(e)
return rv
def make_buffer_frame(self, body):
new_body = []
new_body.append(Assign(
targets=[
NameX('__TK__output', store=True),
],
value=simple_call(
NameX('__TK__mkbuffer')
)
))
new_body.extend(body)
new_body.append(Return(value=NameX('__TK__output')))
return new_body
def make_function(self, name, body, add_buffer=False, arguments=()):
# ensure that the function name is an str
func = simple_function_def(str(name), arguments=arguments)
new_body = func.body = []
if add_buffer:
new_body.extend(self.make_buffer_frame(body))
else:
new_body.extend(body)
if not new_body:
new_body.append(Pass())
return func
def generate_varscope(self, body):
name = gen_name('variable_scope')
rv = [
self.make_function(name, body,
arguments=['__TK__output', '__TK__escape']),
Expr(simple_call(NameX(name),
[NameX('__TK__output'), NameX('__TK__escape')]))
]
return rv
class PyOutputNode(PythonNode):
def __init__(self, text):
super(PyOutputNode, self).__init__()
self.text = text
def get_expressions(self):
return [self.get_expression()]
def get_expression(self):
return Str(s=self.text)
def generate_ast(self, generator, parent):
return self.generate_output_ast(self.get_expression(), generator,
parent)
class PyTranslatableOutputNode(PyOutputNode):
def __init__(self, text, needs_escape=False):
super(PyTranslatableOutputNode, self).__init__(text)
self.needs_escape = needs_escape
def get_expressions(self):
return [self.get_expression()]
def get_expression(self):
name = 'gettext'
if self.needs_escape:
name = 'egettext'
expr = simple_call(
NameX(name),
[Str(s=self.text)],
)
return expr
class PyExpressionNode(PythonNode):
def __init__(self, expression):
super(PyExpressionNode, self).__init__()
self.expr = expression
def get_expressions(self):
return [self.get_expression()]
def get_expression(self):
return simple_call(
NameX('__TK__escape'),
[self.get_unescaped_expression()]
)
def get_unescaped_expression(self):
return get_fragment_ast(self.expr)
def generate_ast(self, generator, parent):
return self.generate_output_ast(self.get_expression(), generator,
parent)
class PyCodeNode(PythonNode):
def __init__(self, source):
super(PyCodeNode, self).__init__()
self.source = source
def generate_ast(self, generator, parent):
return get_fragment_ast(self.source, mode='exec')
def coalesce_strings(args):
rv = []
str_on = None
for i in args:
if isinstance(i, Str):
if str_on:
str_on.s += i.s
continue
str_on = i
else:
str_on = None
rv.append(i)
return rv
class PyComplexNode(ComplexNode, PythonNode):
def generate_child_ast(self, generator, parent_for_children):
rv = []
for i in self.children:
rv.extend(i.generate_ast(generator, parent_for_children))
return rv
class PyIfNode(PyComplexNode):
def __init__(self, expression):
super(PyIfNode, self).__init__()
self.expression = expression
def generate_ast(self, generator, parent):
test = get_fragment_ast(self.expression)
boolean = static_expr_to_bool(test)
if boolean == False:
return []
if boolean == True:
return self.generate_child_ast(generator, parent)
node = If(
test=test,
body=self.generate_child_ast(generator, self),
orelse=[]
)
return [node]
def PyUnlessNode(self, expression):
expression = get_fragment_ast(expression)
expression = UnaryOp(op=Not(), operand=expression)
return PyIfNode(expression)
class PyImportNode(PythonNode):
def __init__(self, href, alias):
super(PyImportNode, self).__init__()
self.href = str(href)
self.alias = str(alias)
def generate_ast(self, generator, parent):
node = Assign(
targets=[NameX(str(self.alias), store=True)],
value=
simple_call(
func=
Attribute(value=NameX('__TK__runtime', store=False),
attr='import_defs', ctx=Load()),
args=[
NameX('__TK__original_context'),
Str(s=self.href)
]
)
)
if parent.is_top_level:
generator.add_top_level_import(str(self.alias), node)
return []
return [node]
class PyAttributeNode(PyComplexNode):
def __init__(self, name, value):
super(PyAttributeNode, self).__init__()
self.name = name
def get_expressions(self):
rv = []
for i in self.children:
rv.extend(i.get_expressions())
return rv
def generate_ast(self, generator, parent):
if len(self.children) == 1 and \
isinstance(self.children[0], PyExpressionNode):
# special case, the attribute contains a single
# expression, these are handled by
# __TK__output.output_boolean_attr,
# given the name, and unescaped expression!
return [Expr(simple_call(
func=Attribute(
value=NameX('__TK__output'),
attr='output_boolean_attr',
ctx=Load()
),
args=[
Str(s=self.name),
self.children[0].get_unescaped_expression()
]
))]
# otherwise just return the output for the attribute code
# like before
return self.generate_output_ast(
[Str(s=' %s="' % self.name)] +
self.get_expressions() +
[Str(s='"')],
generator, parent
)
class PyAttrsNode(PythonNode):
def __init__(self, expression):
super(PyAttrsNode, self).__init__()
self.expression = expression
def generate_ast(self, generator, parent):
expression = get_fragment_ast(self.expression)
output = simple_call(
NameX('__TK__output_attrs'),
args=[expression]
)
return self.generate_output_ast(output, generator, parent)
class PyForNode(PyComplexNode):
def __init__(self, target_and_expression, parts):
super(PyForNode, self).__init__()
self.target_and_expression = target_and_expression
def generate_contents(self, generator, parent):
lineno, col = getattr(self.target_and_expression, 'position', (1, 0))
body = get_fragment_ast(
StringWithLocation('for %s: pass' % self.target_and_expression,
lineno, col - 4),
'exec',
)
for_node = body[0]
for_node.body = self.generate_child_ast(generator, self)
return [for_node]
def generate_ast(self, generator, parent):
# TODO: this could be needed to be reinstantiated
# return self.generate_varscope(self.generate_contents())
return self.generate_contents(generator, parent)
class PyDefineNode(PyComplexNode):
def __init__(self, funcspec):
super(PyDefineNode, self).__init__()
self.position = getattr(funcspec, 'position', (1, 0))
if '(' not in funcspec:
funcspec += '()'
self.funcspec = funcspec
def generate_ast(self, generator, parent):
body = get_fragment_ast(
StringWithLocation('def %s: pass' % self.funcspec,
self.position[0], self.position[1] - 4),
"exec"
)
def_node = body[0]
name = self.funcspec.partition('(')[0]
def_node.body = self.make_buffer_frame(
self.generate_child_ast(generator, self)
)
# move the function out of the closure
if parent.is_top_level:
generator.add_top_def(def_node.name, def_node)
return []
return [def_node]
class PyComplexExprNode(PyComplexNode):
def get_expressions(self):
rv = []
for i in self.children:
if hasattr(i, 'get_expression'):
rv.append(i.get_expression())
else:
rv.extend(i.get_expressions())
return rv
def generate_ast(self, generator, parent=None):
return self.generate_output_ast(self.get_expressions(),
generator, parent)
class PyBlockNode(PyComplexNode):
def __init__(self, name):
super(PyBlockNode, self).__init__()
self.name = name
def generate_ast(self, generator, parent):
is_extended = isinstance(parent, PyExtendsNode)
name = self.name
blockfunc_name = '__TK__block__%s' % name
position = getattr(name, 'position', (1, 0))
body = get_fragment_ast(
StringWithLocation(
'def %s():pass' % blockfunc_name,
position[0], position[1] - 4),
'exec'
)
def_node = body[0]
def_node.body = self.make_buffer_frame(
self.generate_child_ast(generator, self)
)
if not isinstance(name, str): # pragma: python2
name = name.encode('UTF-8')
generator.add_block(str(name), def_node, blockfunc_name)
if not is_extended:
# call the block in place
return self.generate_output_ast(
[simple_call(NameX(str(self.name)), [])],
self,
parent,
position=position
)
else:
return []
class PyWithNode(PyComplexNode):
def __init__(self, vars):
super(PyWithNode, self).__init__()
self.vars = vars
def generate_ast(self, generator, parent=None):
var_defs = get_fragment_ast(self.vars, 'exec')
body = var_defs + self.generate_child_ast(generator, self)
return self.generate_varscope(body)
class PyExtendsNode(PyComplexNode):
is_top_level = True
def __init__(self, href):
super(PyExtendsNode, self).__init__()
self.href = href
def generate_ast(self, generator, parent=None):
generator.make_extended_template(self.href)
return self.generate_child_ast(generator, self)
def ast_equals(tree1, tree2):
x1 = ast.dump(tree1)
x2 = ast.dump(tree2)
return x1 == x2
def coalesce_outputs(tree):
"""
Coalesce the constant output expressions
__output__('foo')
__output__('bar')
__output__(baz)
__output__('xyzzy')
into
__output__('foobar', baz, 'xyzzy')
"""
coalesce_all_outputs = True
if coalesce_all_outputs:
should_coalesce = lambda n: True
else:
should_coalesce = lambda n: n.output_args[0].__class__ is Str
class OutputCoalescer(NodeVisitor):
def visit(self, node):
# if - else expression also has a body! it is not we want, though.
if hasattr(node, 'body') and isinstance(node.body, Iterable):
# coalesce continuous string output nodes
new_body = []
output_node = None
def coalesce_strs():
if output_node:
output_node.value.args[:] = \
coalesce_strings(output_node.value.args)
for i in node.body:
if hasattr(i, 'output_args') and should_coalesce(i):
if output_node:
if len(output_node.value.args) + len(i.output_args) > 250:
coalesce_strs()
output_node = i
else:
output_node.value.args.extend(i.output_args)
continue
output_node = i
else:
coalesce_strs()
output_node = None
new_body.append(i)
coalesce_strs()
node.body[:] = new_body
NodeVisitor.visit(self, node)
def check(self, node):
"""
Coalesce __TK__output(__TK__escape(literal(x))) into
__TK__output(x).
"""
if not ast_equals(node.func, NameX('__TK__output')):
return
for i in range(len(node.args)):
arg1 = node.args[i]
if not arg1.__class__.__name__ == 'Call':
continue
if not ast_equals(arg1.func, NameX('__TK__escape')):
continue
if len(arg1.args) != 1:
continue
arg2 = arg1.args[0]
if not arg2.__class__.__name__ == 'Call':
continue
if not ast_equals(arg2.func, NameX('literal')):
continue
if len(arg2.args) != 1:
continue
node.args[i] = arg2.args[0]
def visit_Call(self, node):
self.check(node)
self.generic_visit(node)
OutputCoalescer().visit(tree)
def remove_locations(node):
"""
Removes locations from the given AST tree completely
"""
def fix(node):
if 'lineno' in node._attributes and hasattr(node, 'lineno'):
del node.lineno
if 'col_offset' in node._attributes and hasattr(node, 'col_offset'):
del node.col_offset
for child in iter_child_nodes(node):
fix(child)
fix(node)
class PyRootNode(PyComplexNode):
def __init__(self):
super(PyRootNode, self).__init__()
is_top_level = True
def generate_ast(self, generator, parent=None):
main_body = self.generate_child_ast(generator, self)
extended = generator.extended_href
toplevel_funcs = generator.blocks + generator.top_defs
# do not generate __main__ for extended templates
if not extended:
main_func = self.make_function('__main__', main_body,
add_buffer=True)
generator.add_bind_decorator(main_func)
toplevel_funcs = [main_func] + toplevel_funcs
# analyze the set of free variables
free_variables = set()
for i in toplevel_funcs:
fv_info = FreeVarFinder.for_ast(i)
free_variables.update(fv_info.get_free_variables())
# discard __TK__ variables, always builtin names True, False, None
# from free variables.
for i in list(free_variables):
if i.startswith('__TK__') or i in ALWAYS_BUILTINS:
free_variables.discard(i)
# discard the names of toplevel funcs from free variables
free_variables.difference_update(generator.top_level_names)
code = '__TK__mkbuffer = __TK__runtime.Buffer\n'
code += '__TK__escape = __TK__escape_g = __TK__runtime.escape\n'
code += '__TK__output_attrs = __TK__runtime.output_attrs\n'
if extended:
code += '__TK__parent_template = __TK__runtime.load(%r)\n' % \
extended
code += 'def __TK__binder(__TK__context):\n'
code += ' __TK__original_context = __TK__context.copy()\n'
code += ' __TK__bind = __TK__runtime.bind(__TK__context)\n'
code += ' __TK__bindblock = __TK__runtime.bind(__TK__context, ' \
'block=True)\n'
# bind gettext early!
for i in ['egettext']:
if i in free_variables:
free_variables.add('gettext')
free_variables.discard(i)
if 'gettext' in free_variables:
code += ' def egettext(msg):\n'
code += ' return __TK__escape(gettext(msg))\n'
code += ' gettext = __TK__context["gettext"]\n'
free_variables.discard('gettext')
code += ' raise\n' # a placeholder
if extended:
# an extended template does not have a __main__ (it is inherited)
code += ' __TK__parent_template.binder_func(__TK__context)\n'
for i in free_variables:
code += ' if "%s" in __TK__context:\n' % i
code += ' %s = __TK__context["%s"]\n' % (i, i)
code += ' return __TK__context\n'
tree = ast.parse(code)
remove_locations(tree)
class LocatorAndTransformer(ast.NodeTransformer):
binder = None
def visit_FunctionDef(self, node):
if node.name == '__TK__binder' and not self.binder:
self.binder = node
self.generic_visit(node)
return node
locator = LocatorAndTransformer()
locator.visit(tree)
# inject the other top level funcs in the binder
binder = locator.binder
for i, e in enumerate(binder.body):
if isinstance(e, Raise):
break
binder.body[i:i + 1] = toplevel_funcs
binder.body[i:i] = generator.imports
coalesce_outputs(tree)
return tree
# noinspection PyProtectedMember
class LocationMapper(object):
def __init__(self):
self.lineno_map = {1: 1}
self.prev_original_line = 1
self.prev_mapped_line = 1
self.prev_column = 0
def map_linenos(self, node):
if 'lineno' in node._attributes:
if hasattr(node, 'lineno'):
if node.lineno != self.prev_original_line:
self.prev_mapped_line += 1
self.lineno_map[self.prev_mapped_line] = node.lineno
self.prev_original_line = node.lineno
node.lineno = self.prev_mapped_line
if 'col_offset' in node._attributes:
if hasattr(node, 'col_offset'):
self.prev_column = node.col_offset
node.col_offset = self.prev_column
for child in iter_child_nodes(node):
self.map_linenos(child)
class Generator(BaseGenerator):
OutputNode = PyOutputNode
TranslatableOutputNode = PyTranslatableOutputNode
IfNode = PyIfNode
ForNode = PyForNode
DefineNode = PyDefineNode
ComplexExprNode = PyComplexExprNode
ExpressionNode = PyExpressionNode
ImportNode = PyImportNode
RootNode = PyRootNode
AttributeNode = PyAttributeNode
AttrsNode = PyAttrsNode
UnlessNode = PyUnlessNode
ExtendsNode = PyExtendsNode
BlockNode = PyBlockNode
CodeNode = PyCodeNode
WithNode = PyWithNode
def __init__(self, ir_tree):
super(Generator, self).__init__(ir_tree)
self.blocks = []
self.top_defs = []
self.top_level_names = set()
self.extended_href = None
self.imports = []
self.lnotab = None
def add_bind_decorator(self, func, block=True):
binder_call = NameX('__TK__bind' + ('block' if block else ''))
decors = [binder_call]
func.decorator_list = decors
def add_block(self, name, blockfunc, blockfunc_name):
self.top_level_names.add(blockfunc_name)
self.add_bind_decorator(blockfunc, block=True)
self.blocks.append(blockfunc)
def add_top_def(self, name, defblock):
self.top_level_names.add(name)
self.add_bind_decorator(defblock)
self.top_defs.append(defblock)
def add_top_level_import(self, name, node):
self.top_level_names.add(name)
self.imports.append(node)
def make_extended_template(self, href):
self.extended_href = href
def lnotab_info(self):
return self.lnotab
def generate_ast(self):
tree = super(Generator, self).generate_ast()
loc_mapper = LocationMapper()
loc_mapper.map_linenos(tree)
self.lnotab = loc_mapper.lineno_map
return tree
|
tetframework/Tonnikala
|
tonnikala/languages/python/generator.py
|
coalesce_outputs
|
python
|
def coalesce_outputs(tree):
coalesce_all_outputs = True
if coalesce_all_outputs:
should_coalesce = lambda n: True
else:
should_coalesce = lambda n: n.output_args[0].__class__ is Str
class OutputCoalescer(NodeVisitor):
def visit(self, node):
# if - else expression also has a body! it is not we want, though.
if hasattr(node, 'body') and isinstance(node.body, Iterable):
# coalesce continuous string output nodes
new_body = []
output_node = None
def coalesce_strs():
if output_node:
output_node.value.args[:] = \
coalesce_strings(output_node.value.args)
for i in node.body:
if hasattr(i, 'output_args') and should_coalesce(i):
if output_node:
if len(output_node.value.args) + len(i.output_args) > 250:
coalesce_strs()
output_node = i
else:
output_node.value.args.extend(i.output_args)
continue
output_node = i
else:
coalesce_strs()
output_node = None
new_body.append(i)
coalesce_strs()
node.body[:] = new_body
NodeVisitor.visit(self, node)
def check(self, node):
"""
Coalesce __TK__output(__TK__escape(literal(x))) into
__TK__output(x).
"""
if not ast_equals(node.func, NameX('__TK__output')):
return
for i in range(len(node.args)):
arg1 = node.args[i]
if not arg1.__class__.__name__ == 'Call':
continue
if not ast_equals(arg1.func, NameX('__TK__escape')):
continue
if len(arg1.args) != 1:
continue
arg2 = arg1.args[0]
if not arg2.__class__.__name__ == 'Call':
continue
if not ast_equals(arg2.func, NameX('literal')):
continue
if len(arg2.args) != 1:
continue
node.args[i] = arg2.args[0]
def visit_Call(self, node):
self.check(node)
self.generic_visit(node)
OutputCoalescer().visit(tree)
|
Coalesce the constant output expressions
__output__('foo')
__output__('bar')
__output__(baz)
__output__('xyzzy')
into
__output__('foobar', baz, 'xyzzy')
|
train
|
https://github.com/tetframework/Tonnikala/blob/99d168657da1b2372ff898254f80808ea8d1b83f/tonnikala/languages/python/generator.py#L560-L651
| null |
# -*- coding: utf-8 -*-
# notice: this module cannot be sanely written to take use of
# unicode_literals, bc some of the arguments need to be str on
# both python2 and 3
from __future__ import absolute_import, division, print_function
import ast
from ast import *
from collections import Iterable
from .astalyzer import FreeVarFinder
from ..base import LanguageNode, ComplexNode, BaseGenerator
from ...compat import string_types, PY2
from ...helpers import StringWithLocation
from ...runtime.debug import TemplateSyntaxError
try: # pragma: no cover
import sysconfig
HAS_ASSERT = bool(sysconfig.get_config_var('Py_DEBUG'))
del sysconfig
except ImportError: # pragma: no cover
HAS_ASSERT = False
name_counter = 0
ALWAYS_BUILTINS = '''
False
True
None
'''.split()
def simple_call(func, args=None):
return Call(func=func, args=args or [], keywords=[], starargs=None,
kwargs=None)
if PY2: # pragma: python2
def create_argument_list(arguments):
return [Name(id=id, ctx=Param()) for id in arguments]
else: # pragma: python 3
def create_argument_list(arguments):
return [arg(arg=id, annotation=None) for id in arguments]
def simple_function_def(name, arguments=()):
arguments = create_argument_list(arguments)
return FunctionDef(
name=name,
args=ast.arguments(
args=arguments,
vararg=None,
varargannotation=None,
kwonlyargs=[],
kwarg=None,
kwargannotation=None,
defaults=[],
kw_defaults=[]),
body=[Pass()],
decorator_list=[],
returns=None
)
def NameX(id, store=False):
return Name(id=id, ctx=Load() if not store else Store())
def adjust_locations(ast_node, first_lineno, first_offset):
"""
Adjust the locations of the ast nodes, offsetting them
to the new lineno and column offset
"""
line_delta = first_lineno - 1
def _fix(node):
if 'lineno' in node._attributes:
lineno = node.lineno
col = node.col_offset
# adjust the offset on the first line
if lineno == 1:
col += first_offset
lineno += line_delta
node.lineno = lineno
node.col_offset = col
for child in iter_child_nodes(node):
_fix(child)
_fix(ast_node)
def get_fragment_ast(expression, mode='eval', adjust=(0, 0)):
if not isinstance(expression, string_types):
return expression
t = None
position = getattr(expression, 'position', (1, 0))
position = position[0] + adjust[0], position[1] + adjust[1]
try:
exp = expression
if expression[-1:] != '\n':
exp = expression + '\n'
tree = ast.parse(exp, mode=mode)
except SyntaxError as e:
lineno = e.lineno
lineno += position[0] - 1
t = TemplateSyntaxError(e.msg, lineno=lineno)
if t:
raise t
adjust_locations(tree, position[0], position[1])
return tree.body
def gen_name(typename=None):
global name_counter
name_counter += 1
if typename:
return "__TK__typed__%s__%d__" % (typename, name_counter)
else:
return "__TK_%d__" % (name_counter)
def static_eval(expr):
if isinstance(expr, UnaryOp) and isinstance(expr.op, Not):
return not static_eval(expr.operand)
return literal_eval(expr)
def static_expr_to_bool(expr):
try:
return bool(static_eval(expr))
except:
return None
class PythonNode(LanguageNode):
is_top_level = False
def generate_output_ast(self, code, generator, parent, escape=False,
position=None):
func = Name(id='__TK__output', ctx=Load())
if not isinstance(code, list):
code = [code]
rv = []
for i in code:
if position is not None:
i.lineno, i.col_offset = position
e = Expr(simple_call(func, [i]))
e.output_args = [i]
rv.append(e)
return rv
def make_buffer_frame(self, body):
new_body = []
new_body.append(Assign(
targets=[
NameX('__TK__output', store=True),
],
value=simple_call(
NameX('__TK__mkbuffer')
)
))
new_body.extend(body)
new_body.append(Return(value=NameX('__TK__output')))
return new_body
def make_function(self, name, body, add_buffer=False, arguments=()):
# ensure that the function name is an str
func = simple_function_def(str(name), arguments=arguments)
new_body = func.body = []
if add_buffer:
new_body.extend(self.make_buffer_frame(body))
else:
new_body.extend(body)
if not new_body:
new_body.append(Pass())
return func
def generate_varscope(self, body):
name = gen_name('variable_scope')
rv = [
self.make_function(name, body,
arguments=['__TK__output', '__TK__escape']),
Expr(simple_call(NameX(name),
[NameX('__TK__output'), NameX('__TK__escape')]))
]
return rv
class PyOutputNode(PythonNode):
def __init__(self, text):
super(PyOutputNode, self).__init__()
self.text = text
def get_expressions(self):
return [self.get_expression()]
def get_expression(self):
return Str(s=self.text)
def generate_ast(self, generator, parent):
return self.generate_output_ast(self.get_expression(), generator,
parent)
class PyTranslatableOutputNode(PyOutputNode):
def __init__(self, text, needs_escape=False):
super(PyTranslatableOutputNode, self).__init__(text)
self.needs_escape = needs_escape
def get_expressions(self):
return [self.get_expression()]
def get_expression(self):
name = 'gettext'
if self.needs_escape:
name = 'egettext'
expr = simple_call(
NameX(name),
[Str(s=self.text)],
)
return expr
class PyExpressionNode(PythonNode):
def __init__(self, expression):
super(PyExpressionNode, self).__init__()
self.expr = expression
def get_expressions(self):
return [self.get_expression()]
def get_expression(self):
return simple_call(
NameX('__TK__escape'),
[self.get_unescaped_expression()]
)
def get_unescaped_expression(self):
return get_fragment_ast(self.expr)
def generate_ast(self, generator, parent):
return self.generate_output_ast(self.get_expression(), generator,
parent)
class PyCodeNode(PythonNode):
def __init__(self, source):
super(PyCodeNode, self).__init__()
self.source = source
def generate_ast(self, generator, parent):
return get_fragment_ast(self.source, mode='exec')
def coalesce_strings(args):
rv = []
str_on = None
for i in args:
if isinstance(i, Str):
if str_on:
str_on.s += i.s
continue
str_on = i
else:
str_on = None
rv.append(i)
return rv
class PyComplexNode(ComplexNode, PythonNode):
def generate_child_ast(self, generator, parent_for_children):
rv = []
for i in self.children:
rv.extend(i.generate_ast(generator, parent_for_children))
return rv
class PyIfNode(PyComplexNode):
def __init__(self, expression):
super(PyIfNode, self).__init__()
self.expression = expression
def generate_ast(self, generator, parent):
test = get_fragment_ast(self.expression)
boolean = static_expr_to_bool(test)
if boolean == False:
return []
if boolean == True:
return self.generate_child_ast(generator, parent)
node = If(
test=test,
body=self.generate_child_ast(generator, self),
orelse=[]
)
return [node]
def PyUnlessNode(self, expression):
expression = get_fragment_ast(expression)
expression = UnaryOp(op=Not(), operand=expression)
return PyIfNode(expression)
class PyImportNode(PythonNode):
def __init__(self, href, alias):
super(PyImportNode, self).__init__()
self.href = str(href)
self.alias = str(alias)
def generate_ast(self, generator, parent):
node = Assign(
targets=[NameX(str(self.alias), store=True)],
value=
simple_call(
func=
Attribute(value=NameX('__TK__runtime', store=False),
attr='import_defs', ctx=Load()),
args=[
NameX('__TK__original_context'),
Str(s=self.href)
]
)
)
if parent.is_top_level:
generator.add_top_level_import(str(self.alias), node)
return []
return [node]
class PyAttributeNode(PyComplexNode):
def __init__(self, name, value):
super(PyAttributeNode, self).__init__()
self.name = name
def get_expressions(self):
rv = []
for i in self.children:
rv.extend(i.get_expressions())
return rv
def generate_ast(self, generator, parent):
if len(self.children) == 1 and \
isinstance(self.children[0], PyExpressionNode):
# special case, the attribute contains a single
# expression, these are handled by
# __TK__output.output_boolean_attr,
# given the name, and unescaped expression!
return [Expr(simple_call(
func=Attribute(
value=NameX('__TK__output'),
attr='output_boolean_attr',
ctx=Load()
),
args=[
Str(s=self.name),
self.children[0].get_unescaped_expression()
]
))]
# otherwise just return the output for the attribute code
# like before
return self.generate_output_ast(
[Str(s=' %s="' % self.name)] +
self.get_expressions() +
[Str(s='"')],
generator, parent
)
class PyAttrsNode(PythonNode):
def __init__(self, expression):
super(PyAttrsNode, self).__init__()
self.expression = expression
def generate_ast(self, generator, parent):
expression = get_fragment_ast(self.expression)
output = simple_call(
NameX('__TK__output_attrs'),
args=[expression]
)
return self.generate_output_ast(output, generator, parent)
class PyForNode(PyComplexNode):
def __init__(self, target_and_expression, parts):
super(PyForNode, self).__init__()
self.target_and_expression = target_and_expression
def generate_contents(self, generator, parent):
lineno, col = getattr(self.target_and_expression, 'position', (1, 0))
body = get_fragment_ast(
StringWithLocation('for %s: pass' % self.target_and_expression,
lineno, col - 4),
'exec',
)
for_node = body[0]
for_node.body = self.generate_child_ast(generator, self)
return [for_node]
def generate_ast(self, generator, parent):
# TODO: this could be needed to be reinstantiated
# return self.generate_varscope(self.generate_contents())
return self.generate_contents(generator, parent)
class PyDefineNode(PyComplexNode):
def __init__(self, funcspec):
super(PyDefineNode, self).__init__()
self.position = getattr(funcspec, 'position', (1, 0))
if '(' not in funcspec:
funcspec += '()'
self.funcspec = funcspec
def generate_ast(self, generator, parent):
body = get_fragment_ast(
StringWithLocation('def %s: pass' % self.funcspec,
self.position[0], self.position[1] - 4),
"exec"
)
def_node = body[0]
name = self.funcspec.partition('(')[0]
def_node.body = self.make_buffer_frame(
self.generate_child_ast(generator, self)
)
# move the function out of the closure
if parent.is_top_level:
generator.add_top_def(def_node.name, def_node)
return []
return [def_node]
class PyComplexExprNode(PyComplexNode):
def get_expressions(self):
rv = []
for i in self.children:
if hasattr(i, 'get_expression'):
rv.append(i.get_expression())
else:
rv.extend(i.get_expressions())
return rv
def generate_ast(self, generator, parent=None):
return self.generate_output_ast(self.get_expressions(),
generator, parent)
class PyBlockNode(PyComplexNode):
def __init__(self, name):
super(PyBlockNode, self).__init__()
self.name = name
def generate_ast(self, generator, parent):
is_extended = isinstance(parent, PyExtendsNode)
name = self.name
blockfunc_name = '__TK__block__%s' % name
position = getattr(name, 'position', (1, 0))
body = get_fragment_ast(
StringWithLocation(
'def %s():pass' % blockfunc_name,
position[0], position[1] - 4),
'exec'
)
def_node = body[0]
def_node.body = self.make_buffer_frame(
self.generate_child_ast(generator, self)
)
if not isinstance(name, str): # pragma: python2
name = name.encode('UTF-8')
generator.add_block(str(name), def_node, blockfunc_name)
if not is_extended:
# call the block in place
return self.generate_output_ast(
[simple_call(NameX(str(self.name)), [])],
self,
parent,
position=position
)
else:
return []
class PyWithNode(PyComplexNode):
def __init__(self, vars):
super(PyWithNode, self).__init__()
self.vars = vars
def generate_ast(self, generator, parent=None):
var_defs = get_fragment_ast(self.vars, 'exec')
body = var_defs + self.generate_child_ast(generator, self)
return self.generate_varscope(body)
class PyExtendsNode(PyComplexNode):
is_top_level = True
def __init__(self, href):
super(PyExtendsNode, self).__init__()
self.href = href
def generate_ast(self, generator, parent=None):
generator.make_extended_template(self.href)
return self.generate_child_ast(generator, self)
def ast_equals(tree1, tree2):
x1 = ast.dump(tree1)
x2 = ast.dump(tree2)
return x1 == x2
def remove_locations(node):
"""
Removes locations from the given AST tree completely
"""
def fix(node):
if 'lineno' in node._attributes and hasattr(node, 'lineno'):
del node.lineno
if 'col_offset' in node._attributes and hasattr(node, 'col_offset'):
del node.col_offset
for child in iter_child_nodes(node):
fix(child)
fix(node)
class PyRootNode(PyComplexNode):
def __init__(self):
super(PyRootNode, self).__init__()
is_top_level = True
def generate_ast(self, generator, parent=None):
main_body = self.generate_child_ast(generator, self)
extended = generator.extended_href
toplevel_funcs = generator.blocks + generator.top_defs
# do not generate __main__ for extended templates
if not extended:
main_func = self.make_function('__main__', main_body,
add_buffer=True)
generator.add_bind_decorator(main_func)
toplevel_funcs = [main_func] + toplevel_funcs
# analyze the set of free variables
free_variables = set()
for i in toplevel_funcs:
fv_info = FreeVarFinder.for_ast(i)
free_variables.update(fv_info.get_free_variables())
# discard __TK__ variables, always builtin names True, False, None
# from free variables.
for i in list(free_variables):
if i.startswith('__TK__') or i in ALWAYS_BUILTINS:
free_variables.discard(i)
# discard the names of toplevel funcs from free variables
free_variables.difference_update(generator.top_level_names)
code = '__TK__mkbuffer = __TK__runtime.Buffer\n'
code += '__TK__escape = __TK__escape_g = __TK__runtime.escape\n'
code += '__TK__output_attrs = __TK__runtime.output_attrs\n'
if extended:
code += '__TK__parent_template = __TK__runtime.load(%r)\n' % \
extended
code += 'def __TK__binder(__TK__context):\n'
code += ' __TK__original_context = __TK__context.copy()\n'
code += ' __TK__bind = __TK__runtime.bind(__TK__context)\n'
code += ' __TK__bindblock = __TK__runtime.bind(__TK__context, ' \
'block=True)\n'
# bind gettext early!
for i in ['egettext']:
if i in free_variables:
free_variables.add('gettext')
free_variables.discard(i)
if 'gettext' in free_variables:
code += ' def egettext(msg):\n'
code += ' return __TK__escape(gettext(msg))\n'
code += ' gettext = __TK__context["gettext"]\n'
free_variables.discard('gettext')
code += ' raise\n' # a placeholder
if extended:
# an extended template does not have a __main__ (it is inherited)
code += ' __TK__parent_template.binder_func(__TK__context)\n'
for i in free_variables:
code += ' if "%s" in __TK__context:\n' % i
code += ' %s = __TK__context["%s"]\n' % (i, i)
code += ' return __TK__context\n'
tree = ast.parse(code)
remove_locations(tree)
class LocatorAndTransformer(ast.NodeTransformer):
binder = None
def visit_FunctionDef(self, node):
if node.name == '__TK__binder' and not self.binder:
self.binder = node
self.generic_visit(node)
return node
locator = LocatorAndTransformer()
locator.visit(tree)
# inject the other top level funcs in the binder
binder = locator.binder
for i, e in enumerate(binder.body):
if isinstance(e, Raise):
break
binder.body[i:i + 1] = toplevel_funcs
binder.body[i:i] = generator.imports
coalesce_outputs(tree)
return tree
# noinspection PyProtectedMember
class LocationMapper(object):
def __init__(self):
self.lineno_map = {1: 1}
self.prev_original_line = 1
self.prev_mapped_line = 1
self.prev_column = 0
def map_linenos(self, node):
if 'lineno' in node._attributes:
if hasattr(node, 'lineno'):
if node.lineno != self.prev_original_line:
self.prev_mapped_line += 1
self.lineno_map[self.prev_mapped_line] = node.lineno
self.prev_original_line = node.lineno
node.lineno = self.prev_mapped_line
if 'col_offset' in node._attributes:
if hasattr(node, 'col_offset'):
self.prev_column = node.col_offset
node.col_offset = self.prev_column
for child in iter_child_nodes(node):
self.map_linenos(child)
class Generator(BaseGenerator):
OutputNode = PyOutputNode
TranslatableOutputNode = PyTranslatableOutputNode
IfNode = PyIfNode
ForNode = PyForNode
DefineNode = PyDefineNode
ComplexExprNode = PyComplexExprNode
ExpressionNode = PyExpressionNode
ImportNode = PyImportNode
RootNode = PyRootNode
AttributeNode = PyAttributeNode
AttrsNode = PyAttrsNode
UnlessNode = PyUnlessNode
ExtendsNode = PyExtendsNode
BlockNode = PyBlockNode
CodeNode = PyCodeNode
WithNode = PyWithNode
def __init__(self, ir_tree):
super(Generator, self).__init__(ir_tree)
self.blocks = []
self.top_defs = []
self.top_level_names = set()
self.extended_href = None
self.imports = []
self.lnotab = None
def add_bind_decorator(self, func, block=True):
binder_call = NameX('__TK__bind' + ('block' if block else ''))
decors = [binder_call]
func.decorator_list = decors
def add_block(self, name, blockfunc, blockfunc_name):
self.top_level_names.add(blockfunc_name)
self.add_bind_decorator(blockfunc, block=True)
self.blocks.append(blockfunc)
def add_top_def(self, name, defblock):
self.top_level_names.add(name)
self.add_bind_decorator(defblock)
self.top_defs.append(defblock)
def add_top_level_import(self, name, node):
self.top_level_names.add(name)
self.imports.append(node)
def make_extended_template(self, href):
self.extended_href = href
def lnotab_info(self):
return self.lnotab
def generate_ast(self):
tree = super(Generator, self).generate_ast()
loc_mapper = LocationMapper()
loc_mapper.map_linenos(tree)
self.lnotab = loc_mapper.lineno_map
return tree
|
tetframework/Tonnikala
|
tonnikala/languages/python/generator.py
|
remove_locations
|
python
|
def remove_locations(node):
def fix(node):
if 'lineno' in node._attributes and hasattr(node, 'lineno'):
del node.lineno
if 'col_offset' in node._attributes and hasattr(node, 'col_offset'):
del node.col_offset
for child in iter_child_nodes(node):
fix(child)
fix(node)
|
Removes locations from the given AST tree completely
|
train
|
https://github.com/tetframework/Tonnikala/blob/99d168657da1b2372ff898254f80808ea8d1b83f/tonnikala/languages/python/generator.py#L654-L669
|
[
"def fix(node):\n if 'lineno' in node._attributes and hasattr(node, 'lineno'):\n del node.lineno\n\n if 'col_offset' in node._attributes and hasattr(node, 'col_offset'):\n del node.col_offset\n\n for child in iter_child_nodes(node):\n fix(child)\n"
] |
# -*- coding: utf-8 -*-
# notice: this module cannot be sanely written to take use of
# unicode_literals, bc some of the arguments need to be str on
# both python2 and 3
from __future__ import absolute_import, division, print_function
import ast
from ast import *
from collections import Iterable
from .astalyzer import FreeVarFinder
from ..base import LanguageNode, ComplexNode, BaseGenerator
from ...compat import string_types, PY2
from ...helpers import StringWithLocation
from ...runtime.debug import TemplateSyntaxError
try: # pragma: no cover
import sysconfig
HAS_ASSERT = bool(sysconfig.get_config_var('Py_DEBUG'))
del sysconfig
except ImportError: # pragma: no cover
HAS_ASSERT = False
name_counter = 0
ALWAYS_BUILTINS = '''
False
True
None
'''.split()
def simple_call(func, args=None):
return Call(func=func, args=args or [], keywords=[], starargs=None,
kwargs=None)
if PY2: # pragma: python2
def create_argument_list(arguments):
return [Name(id=id, ctx=Param()) for id in arguments]
else: # pragma: python 3
def create_argument_list(arguments):
return [arg(arg=id, annotation=None) for id in arguments]
def simple_function_def(name, arguments=()):
arguments = create_argument_list(arguments)
return FunctionDef(
name=name,
args=ast.arguments(
args=arguments,
vararg=None,
varargannotation=None,
kwonlyargs=[],
kwarg=None,
kwargannotation=None,
defaults=[],
kw_defaults=[]),
body=[Pass()],
decorator_list=[],
returns=None
)
def NameX(id, store=False):
return Name(id=id, ctx=Load() if not store else Store())
def adjust_locations(ast_node, first_lineno, first_offset):
"""
Adjust the locations of the ast nodes, offsetting them
to the new lineno and column offset
"""
line_delta = first_lineno - 1
def _fix(node):
if 'lineno' in node._attributes:
lineno = node.lineno
col = node.col_offset
# adjust the offset on the first line
if lineno == 1:
col += first_offset
lineno += line_delta
node.lineno = lineno
node.col_offset = col
for child in iter_child_nodes(node):
_fix(child)
_fix(ast_node)
def get_fragment_ast(expression, mode='eval', adjust=(0, 0)):
if not isinstance(expression, string_types):
return expression
t = None
position = getattr(expression, 'position', (1, 0))
position = position[0] + adjust[0], position[1] + adjust[1]
try:
exp = expression
if expression[-1:] != '\n':
exp = expression + '\n'
tree = ast.parse(exp, mode=mode)
except SyntaxError as e:
lineno = e.lineno
lineno += position[0] - 1
t = TemplateSyntaxError(e.msg, lineno=lineno)
if t:
raise t
adjust_locations(tree, position[0], position[1])
return tree.body
def gen_name(typename=None):
global name_counter
name_counter += 1
if typename:
return "__TK__typed__%s__%d__" % (typename, name_counter)
else:
return "__TK_%d__" % (name_counter)
def static_eval(expr):
if isinstance(expr, UnaryOp) and isinstance(expr.op, Not):
return not static_eval(expr.operand)
return literal_eval(expr)
def static_expr_to_bool(expr):
try:
return bool(static_eval(expr))
except:
return None
class PythonNode(LanguageNode):
is_top_level = False
def generate_output_ast(self, code, generator, parent, escape=False,
position=None):
func = Name(id='__TK__output', ctx=Load())
if not isinstance(code, list):
code = [code]
rv = []
for i in code:
if position is not None:
i.lineno, i.col_offset = position
e = Expr(simple_call(func, [i]))
e.output_args = [i]
rv.append(e)
return rv
def make_buffer_frame(self, body):
new_body = []
new_body.append(Assign(
targets=[
NameX('__TK__output', store=True),
],
value=simple_call(
NameX('__TK__mkbuffer')
)
))
new_body.extend(body)
new_body.append(Return(value=NameX('__TK__output')))
return new_body
def make_function(self, name, body, add_buffer=False, arguments=()):
# ensure that the function name is an str
func = simple_function_def(str(name), arguments=arguments)
new_body = func.body = []
if add_buffer:
new_body.extend(self.make_buffer_frame(body))
else:
new_body.extend(body)
if not new_body:
new_body.append(Pass())
return func
def generate_varscope(self, body):
name = gen_name('variable_scope')
rv = [
self.make_function(name, body,
arguments=['__TK__output', '__TK__escape']),
Expr(simple_call(NameX(name),
[NameX('__TK__output'), NameX('__TK__escape')]))
]
return rv
class PyOutputNode(PythonNode):
def __init__(self, text):
super(PyOutputNode, self).__init__()
self.text = text
def get_expressions(self):
return [self.get_expression()]
def get_expression(self):
return Str(s=self.text)
def generate_ast(self, generator, parent):
return self.generate_output_ast(self.get_expression(), generator,
parent)
class PyTranslatableOutputNode(PyOutputNode):
def __init__(self, text, needs_escape=False):
super(PyTranslatableOutputNode, self).__init__(text)
self.needs_escape = needs_escape
def get_expressions(self):
return [self.get_expression()]
def get_expression(self):
name = 'gettext'
if self.needs_escape:
name = 'egettext'
expr = simple_call(
NameX(name),
[Str(s=self.text)],
)
return expr
class PyExpressionNode(PythonNode):
def __init__(self, expression):
super(PyExpressionNode, self).__init__()
self.expr = expression
def get_expressions(self):
return [self.get_expression()]
def get_expression(self):
return simple_call(
NameX('__TK__escape'),
[self.get_unescaped_expression()]
)
def get_unescaped_expression(self):
return get_fragment_ast(self.expr)
def generate_ast(self, generator, parent):
return self.generate_output_ast(self.get_expression(), generator,
parent)
class PyCodeNode(PythonNode):
def __init__(self, source):
super(PyCodeNode, self).__init__()
self.source = source
def generate_ast(self, generator, parent):
return get_fragment_ast(self.source, mode='exec')
def coalesce_strings(args):
rv = []
str_on = None
for i in args:
if isinstance(i, Str):
if str_on:
str_on.s += i.s
continue
str_on = i
else:
str_on = None
rv.append(i)
return rv
class PyComplexNode(ComplexNode, PythonNode):
def generate_child_ast(self, generator, parent_for_children):
rv = []
for i in self.children:
rv.extend(i.generate_ast(generator, parent_for_children))
return rv
class PyIfNode(PyComplexNode):
def __init__(self, expression):
super(PyIfNode, self).__init__()
self.expression = expression
def generate_ast(self, generator, parent):
test = get_fragment_ast(self.expression)
boolean = static_expr_to_bool(test)
if boolean == False:
return []
if boolean == True:
return self.generate_child_ast(generator, parent)
node = If(
test=test,
body=self.generate_child_ast(generator, self),
orelse=[]
)
return [node]
def PyUnlessNode(self, expression):
expression = get_fragment_ast(expression)
expression = UnaryOp(op=Not(), operand=expression)
return PyIfNode(expression)
class PyImportNode(PythonNode):
def __init__(self, href, alias):
super(PyImportNode, self).__init__()
self.href = str(href)
self.alias = str(alias)
def generate_ast(self, generator, parent):
node = Assign(
targets=[NameX(str(self.alias), store=True)],
value=
simple_call(
func=
Attribute(value=NameX('__TK__runtime', store=False),
attr='import_defs', ctx=Load()),
args=[
NameX('__TK__original_context'),
Str(s=self.href)
]
)
)
if parent.is_top_level:
generator.add_top_level_import(str(self.alias), node)
return []
return [node]
class PyAttributeNode(PyComplexNode):
def __init__(self, name, value):
super(PyAttributeNode, self).__init__()
self.name = name
def get_expressions(self):
rv = []
for i in self.children:
rv.extend(i.get_expressions())
return rv
def generate_ast(self, generator, parent):
if len(self.children) == 1 and \
isinstance(self.children[0], PyExpressionNode):
# special case, the attribute contains a single
# expression, these are handled by
# __TK__output.output_boolean_attr,
# given the name, and unescaped expression!
return [Expr(simple_call(
func=Attribute(
value=NameX('__TK__output'),
attr='output_boolean_attr',
ctx=Load()
),
args=[
Str(s=self.name),
self.children[0].get_unescaped_expression()
]
))]
# otherwise just return the output for the attribute code
# like before
return self.generate_output_ast(
[Str(s=' %s="' % self.name)] +
self.get_expressions() +
[Str(s='"')],
generator, parent
)
class PyAttrsNode(PythonNode):
def __init__(self, expression):
super(PyAttrsNode, self).__init__()
self.expression = expression
def generate_ast(self, generator, parent):
expression = get_fragment_ast(self.expression)
output = simple_call(
NameX('__TK__output_attrs'),
args=[expression]
)
return self.generate_output_ast(output, generator, parent)
class PyForNode(PyComplexNode):
def __init__(self, target_and_expression, parts):
super(PyForNode, self).__init__()
self.target_and_expression = target_and_expression
def generate_contents(self, generator, parent):
lineno, col = getattr(self.target_and_expression, 'position', (1, 0))
body = get_fragment_ast(
StringWithLocation('for %s: pass' % self.target_and_expression,
lineno, col - 4),
'exec',
)
for_node = body[0]
for_node.body = self.generate_child_ast(generator, self)
return [for_node]
def generate_ast(self, generator, parent):
# TODO: this could be needed to be reinstantiated
# return self.generate_varscope(self.generate_contents())
return self.generate_contents(generator, parent)
class PyDefineNode(PyComplexNode):
def __init__(self, funcspec):
super(PyDefineNode, self).__init__()
self.position = getattr(funcspec, 'position', (1, 0))
if '(' not in funcspec:
funcspec += '()'
self.funcspec = funcspec
def generate_ast(self, generator, parent):
body = get_fragment_ast(
StringWithLocation('def %s: pass' % self.funcspec,
self.position[0], self.position[1] - 4),
"exec"
)
def_node = body[0]
name = self.funcspec.partition('(')[0]
def_node.body = self.make_buffer_frame(
self.generate_child_ast(generator, self)
)
# move the function out of the closure
if parent.is_top_level:
generator.add_top_def(def_node.name, def_node)
return []
return [def_node]
class PyComplexExprNode(PyComplexNode):
def get_expressions(self):
rv = []
for i in self.children:
if hasattr(i, 'get_expression'):
rv.append(i.get_expression())
else:
rv.extend(i.get_expressions())
return rv
def generate_ast(self, generator, parent=None):
return self.generate_output_ast(self.get_expressions(),
generator, parent)
class PyBlockNode(PyComplexNode):
def __init__(self, name):
super(PyBlockNode, self).__init__()
self.name = name
def generate_ast(self, generator, parent):
is_extended = isinstance(parent, PyExtendsNode)
name = self.name
blockfunc_name = '__TK__block__%s' % name
position = getattr(name, 'position', (1, 0))
body = get_fragment_ast(
StringWithLocation(
'def %s():pass' % blockfunc_name,
position[0], position[1] - 4),
'exec'
)
def_node = body[0]
def_node.body = self.make_buffer_frame(
self.generate_child_ast(generator, self)
)
if not isinstance(name, str): # pragma: python2
name = name.encode('UTF-8')
generator.add_block(str(name), def_node, blockfunc_name)
if not is_extended:
# call the block in place
return self.generate_output_ast(
[simple_call(NameX(str(self.name)), [])],
self,
parent,
position=position
)
else:
return []
class PyWithNode(PyComplexNode):
def __init__(self, vars):
super(PyWithNode, self).__init__()
self.vars = vars
def generate_ast(self, generator, parent=None):
var_defs = get_fragment_ast(self.vars, 'exec')
body = var_defs + self.generate_child_ast(generator, self)
return self.generate_varscope(body)
class PyExtendsNode(PyComplexNode):
is_top_level = True
def __init__(self, href):
super(PyExtendsNode, self).__init__()
self.href = href
def generate_ast(self, generator, parent=None):
generator.make_extended_template(self.href)
return self.generate_child_ast(generator, self)
def ast_equals(tree1, tree2):
x1 = ast.dump(tree1)
x2 = ast.dump(tree2)
return x1 == x2
def coalesce_outputs(tree):
"""
Coalesce the constant output expressions
__output__('foo')
__output__('bar')
__output__(baz)
__output__('xyzzy')
into
__output__('foobar', baz, 'xyzzy')
"""
coalesce_all_outputs = True
if coalesce_all_outputs:
should_coalesce = lambda n: True
else:
should_coalesce = lambda n: n.output_args[0].__class__ is Str
class OutputCoalescer(NodeVisitor):
def visit(self, node):
# if - else expression also has a body! it is not we want, though.
if hasattr(node, 'body') and isinstance(node.body, Iterable):
# coalesce continuous string output nodes
new_body = []
output_node = None
def coalesce_strs():
if output_node:
output_node.value.args[:] = \
coalesce_strings(output_node.value.args)
for i in node.body:
if hasattr(i, 'output_args') and should_coalesce(i):
if output_node:
if len(output_node.value.args) + len(i.output_args) > 250:
coalesce_strs()
output_node = i
else:
output_node.value.args.extend(i.output_args)
continue
output_node = i
else:
coalesce_strs()
output_node = None
new_body.append(i)
coalesce_strs()
node.body[:] = new_body
NodeVisitor.visit(self, node)
def check(self, node):
"""
Coalesce __TK__output(__TK__escape(literal(x))) into
__TK__output(x).
"""
if not ast_equals(node.func, NameX('__TK__output')):
return
for i in range(len(node.args)):
arg1 = node.args[i]
if not arg1.__class__.__name__ == 'Call':
continue
if not ast_equals(arg1.func, NameX('__TK__escape')):
continue
if len(arg1.args) != 1:
continue
arg2 = arg1.args[0]
if not arg2.__class__.__name__ == 'Call':
continue
if not ast_equals(arg2.func, NameX('literal')):
continue
if len(arg2.args) != 1:
continue
node.args[i] = arg2.args[0]
def visit_Call(self, node):
self.check(node)
self.generic_visit(node)
OutputCoalescer().visit(tree)
class PyRootNode(PyComplexNode):
def __init__(self):
super(PyRootNode, self).__init__()
is_top_level = True
def generate_ast(self, generator, parent=None):
main_body = self.generate_child_ast(generator, self)
extended = generator.extended_href
toplevel_funcs = generator.blocks + generator.top_defs
# do not generate __main__ for extended templates
if not extended:
main_func = self.make_function('__main__', main_body,
add_buffer=True)
generator.add_bind_decorator(main_func)
toplevel_funcs = [main_func] + toplevel_funcs
# analyze the set of free variables
free_variables = set()
for i in toplevel_funcs:
fv_info = FreeVarFinder.for_ast(i)
free_variables.update(fv_info.get_free_variables())
# discard __TK__ variables, always builtin names True, False, None
# from free variables.
for i in list(free_variables):
if i.startswith('__TK__') or i in ALWAYS_BUILTINS:
free_variables.discard(i)
# discard the names of toplevel funcs from free variables
free_variables.difference_update(generator.top_level_names)
code = '__TK__mkbuffer = __TK__runtime.Buffer\n'
code += '__TK__escape = __TK__escape_g = __TK__runtime.escape\n'
code += '__TK__output_attrs = __TK__runtime.output_attrs\n'
if extended:
code += '__TK__parent_template = __TK__runtime.load(%r)\n' % \
extended
code += 'def __TK__binder(__TK__context):\n'
code += ' __TK__original_context = __TK__context.copy()\n'
code += ' __TK__bind = __TK__runtime.bind(__TK__context)\n'
code += ' __TK__bindblock = __TK__runtime.bind(__TK__context, ' \
'block=True)\n'
# bind gettext early!
for i in ['egettext']:
if i in free_variables:
free_variables.add('gettext')
free_variables.discard(i)
if 'gettext' in free_variables:
code += ' def egettext(msg):\n'
code += ' return __TK__escape(gettext(msg))\n'
code += ' gettext = __TK__context["gettext"]\n'
free_variables.discard('gettext')
code += ' raise\n' # a placeholder
if extended:
# an extended template does not have a __main__ (it is inherited)
code += ' __TK__parent_template.binder_func(__TK__context)\n'
for i in free_variables:
code += ' if "%s" in __TK__context:\n' % i
code += ' %s = __TK__context["%s"]\n' % (i, i)
code += ' return __TK__context\n'
tree = ast.parse(code)
remove_locations(tree)
class LocatorAndTransformer(ast.NodeTransformer):
binder = None
def visit_FunctionDef(self, node):
if node.name == '__TK__binder' and not self.binder:
self.binder = node
self.generic_visit(node)
return node
locator = LocatorAndTransformer()
locator.visit(tree)
# inject the other top level funcs in the binder
binder = locator.binder
for i, e in enumerate(binder.body):
if isinstance(e, Raise):
break
binder.body[i:i + 1] = toplevel_funcs
binder.body[i:i] = generator.imports
coalesce_outputs(tree)
return tree
# noinspection PyProtectedMember
class LocationMapper(object):
def __init__(self):
self.lineno_map = {1: 1}
self.prev_original_line = 1
self.prev_mapped_line = 1
self.prev_column = 0
def map_linenos(self, node):
if 'lineno' in node._attributes:
if hasattr(node, 'lineno'):
if node.lineno != self.prev_original_line:
self.prev_mapped_line += 1
self.lineno_map[self.prev_mapped_line] = node.lineno
self.prev_original_line = node.lineno
node.lineno = self.prev_mapped_line
if 'col_offset' in node._attributes:
if hasattr(node, 'col_offset'):
self.prev_column = node.col_offset
node.col_offset = self.prev_column
for child in iter_child_nodes(node):
self.map_linenos(child)
class Generator(BaseGenerator):
OutputNode = PyOutputNode
TranslatableOutputNode = PyTranslatableOutputNode
IfNode = PyIfNode
ForNode = PyForNode
DefineNode = PyDefineNode
ComplexExprNode = PyComplexExprNode
ExpressionNode = PyExpressionNode
ImportNode = PyImportNode
RootNode = PyRootNode
AttributeNode = PyAttributeNode
AttrsNode = PyAttrsNode
UnlessNode = PyUnlessNode
ExtendsNode = PyExtendsNode
BlockNode = PyBlockNode
CodeNode = PyCodeNode
WithNode = PyWithNode
def __init__(self, ir_tree):
super(Generator, self).__init__(ir_tree)
self.blocks = []
self.top_defs = []
self.top_level_names = set()
self.extended_href = None
self.imports = []
self.lnotab = None
def add_bind_decorator(self, func, block=True):
binder_call = NameX('__TK__bind' + ('block' if block else ''))
decors = [binder_call]
func.decorator_list = decors
def add_block(self, name, blockfunc, blockfunc_name):
self.top_level_names.add(blockfunc_name)
self.add_bind_decorator(blockfunc, block=True)
self.blocks.append(blockfunc)
def add_top_def(self, name, defblock):
self.top_level_names.add(name)
self.add_bind_decorator(defblock)
self.top_defs.append(defblock)
def add_top_level_import(self, name, node):
self.top_level_names.add(name)
self.imports.append(node)
def make_extended_template(self, href):
self.extended_href = href
def lnotab_info(self):
return self.lnotab
def generate_ast(self):
tree = super(Generator, self).generate_ast()
loc_mapper = LocationMapper()
loc_mapper.map_linenos(tree)
self.lnotab = loc_mapper.lineno_map
return tree
|
tetframework/Tonnikala
|
tonnikala/runtime/python.py
|
bind
|
python
|
def bind(context, block=False):
if block:
def decorate(func):
name = func.__name__.replace('__TK__block__', '')
if name not in context:
context[name] = func
return context[name]
return decorate
def decorate(func):
name = func.__name__
if name not in context:
context[name] = func
return context[name]
return decorate
|
Given the context, returns a decorator wrapper;
the binder replaces the wrapped func with the
value from the context OR puts this function in
the context with the name.
|
train
|
https://github.com/tetframework/Tonnikala/blob/99d168657da1b2372ff898254f80808ea8d1b83f/tonnikala/runtime/python.py#L87-L110
| null |
from __future__ import absolute_import, division, print_function, \
unicode_literals
from collections import Mapping
from markupsafe import escape
from ..compat import text_type, PY3
NoneType = type(None)
class _TKPythonBufferImpl(object):
def __init__(self):
self._buffer = buffer = []
e = buffer.extend
a = buffer.append
def do_output(*objs):
for obj in objs:
if obj.__class__ is self.__class__:
e(obj._buffer)
else:
a(text_type(obj))
self.output = do_output
def output_boolean_attr(name, value):
t = type(value)
if t in (bool, NoneType):
value and do_output(' ' + name + '="' + name + '"')
# skip on false, None
return
do_output(' ' + name + '="')
do_output(escape(value))
do_output('"')
self.output_boolean_attr = output_boolean_attr
def __call__(self, *a):
self.output(*a)
def __html__(self):
return self
def join(self):
return ''.join(self._buffer)
if PY3: # pragma: no cover
__str__ = join
else: # pragma: no cover
__unicode__ = join
def __str__(self):
return self.join().encode('UTF-8')
try: # pragma: no cover
from ._buffer import Buffer, _set_escape_method
_set_escape_method(escape)
except ImportError as e: # pragma: no cover
Buffer = _TKPythonBufferImpl
_set_escape_method = None
del _set_escape_method
def output_attrs(values):
if not values:
return ''
if not isinstance(values, Mapping):
values = iter(values)
else:
values = values.items()
rv = Buffer()
for k, v in values:
rv.output_boolean_attr(k, v)
return rv
class ImportedTemplate(object):
def __init__(self, name):
self._name = name
def __repr__(self): # pragma: no cover
return "<ImportedTemplate '%r'>" % self._name
class TonnikalaRuntime(object):
bind = staticmethod(bind)
Buffer = staticmethod(Buffer)
output_attrs = staticmethod(output_attrs)
escape = staticmethod(escape)
def __init__(self):
self.loader = None
def load(self, href):
return self.loader.load(href)
def import_defs(self, context, href):
modified_context = context.copy()
self.loader.load(href).bind(modified_context)
container = ImportedTemplate(href)
for k, v in modified_context.items():
# modified
if k in context and context[k] is v:
continue
setattr(container, k, v)
return container
|
tetframework/Tonnikala
|
tonnikala/languages/javascript/jslex.py
|
literals
|
python
|
def literals(choices, prefix="", suffix=""):
return "|".join(prefix + re.escape(c) + suffix for c in choices.split())
|
Create a regex from a space-separated list of literal `choices`.
If provided, `prefix` and `suffix` will be attached to each choice
individually.
|
train
|
https://github.com/tetframework/Tonnikala/blob/99d168657da1b2372ff898254f80808ea8d1b83f/tonnikala/languages/javascript/jslex.py#L24-L31
| null |
# -*- coding: utf8 -*-
from __future__ import absolute_import, division, print_function, \
unicode_literals
"""JsLex: a lexer for Javascript"""
# From https://bitbucket.org/ned/jslex
import re
class Tok(object):
"""A specification for a token class."""
num = 0
def __init__(self, name, regex, next=None):
self.id = Tok.num
Tok.num += 1
self.name = name
self.regex = regex
self.next = next
class Lexer(object):
"""A generic multi-state regex-based lexer."""
def __init__(self, states, first):
self.regexes = {}
self.toks = {}
for state, rules in states.items():
parts = []
for tok in rules:
groupid = "t%d" % tok.id
self.toks[groupid] = tok
parts.append("(?P<%s>%s)" % (groupid, tok.regex))
self.regexes[state] = re.compile("|".join(parts),
re.MULTILINE | re.VERBOSE)
self.state = first
def lex(self, text, start=0):
"""Lexically analyze `text`.
Yields pairs (`name`, `tokentext`).
"""
max = len(text)
eaten = start
s = self.state
r = self.regexes
toks = self.toks
while eaten < max:
for match in r[s].finditer(text, eaten):
name = match.lastgroup
tok = toks[name]
toktext = match.group(name)
eaten += len(toktext)
yield (tok.name, toktext)
if tok.next:
s = tok.next
break
self.state = s
class JsLexer(Lexer):
"""A Javascript lexer
>>> lexer = JsLexer()
>>> list(lexer.lex("a = 1"))
[("id", "a"), ("ws", " "), ("punct", "="), ("ws", " "), ("dnum", "1")]
This doesn't properly handle non-Ascii characters in the Javascript source.
"""
# Because these tokens are matched as alternatives in a regex, longer
# possibilities
# must appear in the list before shorter ones, for example, '>>' before '>'.
#
# Note that we don't have to detect malformed Javascript, only properly lex
# correct Javascript, so much of this is simplified.
# Details of Javascript lexical structure are taken from
# http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-262.pdf
# A useful explanation of automatic semicolon insertion is at
# http://inimino.org/~inimino/blog/javascript_semicolons
both_before = [
Tok("comment", r"/\*(.|\n)*?\*/"),
Tok("linecomment", r"//.*?$"),
Tok("ws", r"\s+"),
Tok("keyword", literals("""
break case catch class const continue debugger
default delete do else enum export extends
finally for function if import in instanceof new
return super switch this throw try typeof var
void while with
""", suffix=r"\b"), next='reg'),
Tok("reserved", literals("null true false", suffix=r"\b"), next='div'),
Tok("id", r"""
([a-zA-Z_$ ]|\\u[0-9a-fA-Z]{4}) # first char
([a-zA-Z_$0-9]|\\u[0-9a-fA-F]{4})* # rest chars
""", next='div'),
Tok("hnum", r"0[xX][0-9a-fA-F]+", next='div'),
Tok("onum", r"0[0-7]+"),
Tok("dnum", r"""
( (0|[1-9][0-9]*) # DecimalIntegerLiteral
\. # dot
[0-9]* # DecimalDigits-opt
([eE][-+]?[0-9]+)? # ExponentPart-opt
|
\. # dot
[0-9]+ # DecimalDigits
([eE][-+]?[0-9]+)? # ExponentPart-opt
|
(0|[1-9][0-9]*) # DecimalIntegerLiteral
([eE][-+]?[0-9]+)? # ExponentPart-opt
)
""", next='div'),
Tok("punct", literals("""
>>>= === !== >>> <<= >>= <= >= == != << >> &&
|| += -= *= %= &= |= ^=
"""), next="reg"),
Tok("punct", literals("++ -- ) ]"), next='div'),
Tok("punct", literals("{ } ( [ . ; , < > + - * % & | ^ ! ~ ? : ="),
next='reg'),
Tok("string", r'"([^"\\]|(\\(.|\n)))*?"', next='div'),
Tok("string", r"'([^'\\]|(\\(.|\n)))*?'", next='div'),
]
both_after = [
Tok("other", r"."),
]
states = {
'div': # slash will mean division
both_before + [
Tok("punct", literals("/= /"), next='reg'),
] + both_after,
'reg': # slash will mean regex
both_before + [
Tok("regex",
r"""
/ # opening slash
# First character is..
( [^*\\/[] # anything but * \ / or [
| \\. # or an escape sequence
| \[ # or a class, which has
( [^\]\\] # anything but \ or ]
| \\. # or an escape sequence
)* # many times
\]
)
# Following characters are same, except for excluding
# a star
( [^\\/[] # anything but \ / or [
| \\. # or an escape sequence
| \[ # or a class, which has
( [^\]\\] # anything but \ or ]
| \\. # or an escape sequence
)* # many times
\]
)* # many times
/ # closing slash
[a-zA-Z0-9]* # trailing flags
""", next='div'),
] + both_after,
}
def __init__(self):
super(JsLexer, self).__init__(self.states, 'reg')
|
tetframework/Tonnikala
|
tonnikala/languages/javascript/jslex.py
|
Lexer.lex
|
python
|
def lex(self, text, start=0):
max = len(text)
eaten = start
s = self.state
r = self.regexes
toks = self.toks
while eaten < max:
for match in r[s].finditer(text, eaten):
name = match.lastgroup
tok = toks[name]
toktext = match.group(name)
eaten += len(toktext)
yield (tok.name, toktext)
if tok.next:
s = tok.next
break
self.state = s
|
Lexically analyze `text`.
Yields pairs (`name`, `tokentext`).
|
train
|
https://github.com/tetframework/Tonnikala/blob/99d168657da1b2372ff898254f80808ea8d1b83f/tonnikala/languages/javascript/jslex.py#L52-L75
| null |
class Lexer(object):
"""A generic multi-state regex-based lexer."""
def __init__(self, states, first):
self.regexes = {}
self.toks = {}
for state, rules in states.items():
parts = []
for tok in rules:
groupid = "t%d" % tok.id
self.toks[groupid] = tok
parts.append("(?P<%s>%s)" % (groupid, tok.regex))
self.regexes[state] = re.compile("|".join(parts),
re.MULTILINE | re.VERBOSE)
self.state = first
|
tetframework/Tonnikala
|
tonnikala/loader.py
|
handle_exception
|
python
|
def handle_exception(exc_info=None, source_hint=None, tb_override=_NO):
global _make_traceback
if exc_info is None: # pragma: no cover
exc_info = sys.exc_info()
# the debugging module is imported when it's used for the first time.
# we're doing a lot of stuff there and for applications that do not
# get any exceptions in template rendering there is no need to load
# all of that.
if _make_traceback is None:
from .runtime.debug import make_traceback as _make_traceback
exc_type, exc_value, tb = exc_info
if tb_override is not _NO: # pragma: no cover
tb = tb_override
traceback = _make_traceback((exc_type, exc_value, tb), source_hint)
exc_type, exc_value, tb = traceback.standard_exc_info
reraise(exc_type, exc_value, tb)
|
Exception handling helper. This is used internally to either raise
rewritten exceptions or return a rendered traceback for the template.
|
train
|
https://github.com/tetframework/Tonnikala/blob/99d168657da1b2372ff898254f80808ea8d1b83f/tonnikala/loader.py#L71-L93
|
[
"def reraise(tp, value, tb=None): # pragma: no cover\n if value is None:\n value = tp()\n if value.__traceback__ is not tb:\n raise value.with_traceback(tb)\n raise value\n"
] |
import errno
import sys
import time
import codecs
import os
from .compat import reraise
from .languages.javascript.generator import Generator as JavascriptGenerator
from .languages.python.generator import Generator as PythonGenerator
from .runtime import python, exceptions
from .syntaxes.chameleon import parse as parse_chameleon
from .syntaxes.tonnikala import parse as parse_tonnikala, \
parse_js as parse_js_tonnikala
_make_traceback = None
MIN_CHECK_INTERVAL = 0.25
try: # pragma: python3
import builtins as __builtin__
except ImportError: # pragma: python2
# noinspection PyUnresolvedReferences,PyCompatibility
exec('import __builtin__')
class Helpers():
pass
escape = python.escape
helpers = Helpers()
helpers.literal = lambda x: x
helpers.gettext = lambda x: x
helpers.egettext = lambda x: escape(x)
def get_builtins_with_chain(chain=[helpers]):
builtins = {}
for i in [__builtin__] + list(reversed(chain)):
for j in dir(i):
if not j.startswith('__') and not j.endswith('__'):
builtins[j] = getattr(i, j)
return builtins
_builtins = None
def get_builtins():
global _builtins
if _builtins is None:
_builtins = get_builtins_with_chain()
return _builtins
NOT_FOUND = object()
def make_template_context(context):
rv = get_builtins().copy()
rv.update(context)
return rv
_NO = object()
class Template(object):
handle_exception = staticmethod(handle_exception)
def __init__(self, binder):
self.binder_func = binder
def bind(self, context):
self.binder_func(context)
def render_to_buffer(self, context, funcname='__main__'):
try:
context = make_template_context(context)
self.bind(context)
return context[funcname]()
except Exception as e:
exc_info = sys.exc_info()
try:
self.handle_exception(exc_info)
finally:
del exc_info
def render(self, context, funcname='__main__'):
return self.render_to_buffer(context, funcname).join()
parsers = {
'tonnikala': parse_tonnikala,
'js_tonnikala': parse_js_tonnikala,
'chameleon': parse_chameleon,
}
class TemplateInfo(object):
def __init__(self, filename, lnotab):
self.filename = filename
self.lnotab = lnotab
def get_corresponding_lineno(self, line):
return self.lnotab.get(line, line)
def _new_globals(runtime):
return {
'__TK__runtime': runtime,
'__TK__mkbuffer': runtime.Buffer,
'__TK__escape': runtime.escape,
'__TK__output_attrs': runtime.output_attrs,
'literal': helpers.literal
}
class Loader(object):
handle_exception = staticmethod(handle_exception)
runtime = python.TonnikalaRuntime
def __init__(self, debug=False, syntax='tonnikala', translatable=False):
self.debug = debug
self.syntax = syntax
self.translatable = translatable
def load_string(self, string, filename="<string>"):
parser_func = parsers.get(self.syntax)
if not parser_func:
raise ValueError("Invalid parser syntax %s: valid syntaxes: %r"
% sorted(parsers.keys()))
try:
tree = parser_func(filename, string, translatable=self.translatable)
gen = PythonGenerator(tree)
code = gen.generate_ast()
exc_info = None
except exceptions.TemplateSyntaxError as e:
if e.source is None:
e.source = string
if e.filename is None:
e.filename = filename
exc_info = sys.exc_info()
if exc_info:
self.handle_exception(exc_info, string, tb_override=None)
if self.debug:
import ast
print(ast.dump(code, True, True))
try:
import astor
print(astor.codegen.to_source(code))
except ImportError:
print("Not reversing AST to source as astor was not installed")
runtime = self.runtime()
runtime.loader = self
glob = _new_globals(runtime)
compiled = compile(code, filename, 'exec')
glob['__TK_template_info__'] = TemplateInfo(filename, gen.lnotab_info())
exec(compiled, glob, glob)
template_func = glob['__TK__binder']
return Template(template_func)
class FileLoader(Loader):
def __init__(self, paths=[], debug=False, syntax='tonnikala', *args, **kwargs):
super(FileLoader, self).__init__(*args, debug=debug, syntax=syntax, **kwargs)
self.cache = {}
self.paths = list(paths)
self.reload = False
self._last_reload_check = time.time()
def add_path(self, *a):
self.paths.extend(a)
def resolve(self, name):
if os.path.isabs(name):
if os.path.exists(name):
return name
for i in self.paths:
path = os.path.abspath(os.path.join(i, name))
if os.path.exists(path):
return path
return None
def set_reload(self, flag):
self.reload = flag
def _maybe_purge_cache(self):
"""
If enough time since last check has elapsed, check if any
of the cached templates has changed. If any of the template
files were deleted, remove that file only. If any were
changed, then purge the entire cache.
"""
if self._last_reload_check + MIN_CHECK_INTERVAL > time.time():
return
for name, tmpl in list(self.cache.items()):
if not os.stat(tmpl.path):
self.cache.pop(name)
continue
if os.stat(tmpl.path).st_mtime > tmpl.mtime:
self.cache.clear()
break
self._last_reload_check = time.time()
def load(self, name):
"""
If not yet in the cache, load the named template and compiles it,
placing it into the cache.
If in cache, return the cached template.
"""
if self.reload:
self._maybe_purge_cache()
template = self.cache.get(name)
if template:
return template
path = self.resolve(name)
if not path:
raise OSError(errno.ENOENT, "File not found: %s" % name)
with codecs.open(path, 'r', encoding='UTF-8') as f:
contents = f.read()
mtime = os.fstat(f.fileno()).st_mtime
template = self.load_string(contents, filename=path)
template.mtime = mtime
template.path = path
self.cache[name] = template
return template
class JSLoader(object):
def __init__(self, debug=False, syntax='js_tonnikala', minify=False):
self.debug = debug
self.syntax = syntax
self.minify = minify
def load_string(self, string, filename="<string>"):
parser_func = parsers.get(self.syntax)
if not parser_func:
raise ValueError("Invalid parser syntax %s: valid syntaxes: %r"
% sorted(parsers.keys()))
tree = parser_func(filename, string)
code = JavascriptGenerator(tree).generate_ast()
if self.debug:
print("JS template output code for %s" % filename)
print(code)
if self.minify:
from slimit import minify
code = minify(code, mangle=True)
return code
|
tetframework/Tonnikala
|
tonnikala/loader.py
|
FileLoader._maybe_purge_cache
|
python
|
def _maybe_purge_cache(self):
if self._last_reload_check + MIN_CHECK_INTERVAL > time.time():
return
for name, tmpl in list(self.cache.items()):
if not os.stat(tmpl.path):
self.cache.pop(name)
continue
if os.stat(tmpl.path).st_mtime > tmpl.mtime:
self.cache.clear()
break
self._last_reload_check = time.time()
|
If enough time since last check has elapsed, check if any
of the cached templates has changed. If any of the template
files were deleted, remove that file only. If any were
changed, then purge the entire cache.
|
train
|
https://github.com/tetframework/Tonnikala/blob/99d168657da1b2372ff898254f80808ea8d1b83f/tonnikala/loader.py#L231-L251
| null |
class FileLoader(Loader):
def __init__(self, paths=[], debug=False, syntax='tonnikala', *args, **kwargs):
super(FileLoader, self).__init__(*args, debug=debug, syntax=syntax, **kwargs)
self.cache = {}
self.paths = list(paths)
self.reload = False
self._last_reload_check = time.time()
def add_path(self, *a):
self.paths.extend(a)
def resolve(self, name):
if os.path.isabs(name):
if os.path.exists(name):
return name
for i in self.paths:
path = os.path.abspath(os.path.join(i, name))
if os.path.exists(path):
return path
return None
def set_reload(self, flag):
self.reload = flag
def load(self, name):
"""
If not yet in the cache, load the named template and compiles it,
placing it into the cache.
If in cache, return the cached template.
"""
if self.reload:
self._maybe_purge_cache()
template = self.cache.get(name)
if template:
return template
path = self.resolve(name)
if not path:
raise OSError(errno.ENOENT, "File not found: %s" % name)
with codecs.open(path, 'r', encoding='UTF-8') as f:
contents = f.read()
mtime = os.fstat(f.fileno()).st_mtime
template = self.load_string(contents, filename=path)
template.mtime = mtime
template.path = path
self.cache[name] = template
return template
|
tetframework/Tonnikala
|
tonnikala/loader.py
|
FileLoader.load
|
python
|
def load(self, name):
if self.reload:
self._maybe_purge_cache()
template = self.cache.get(name)
if template:
return template
path = self.resolve(name)
if not path:
raise OSError(errno.ENOENT, "File not found: %s" % name)
with codecs.open(path, 'r', encoding='UTF-8') as f:
contents = f.read()
mtime = os.fstat(f.fileno()).st_mtime
template = self.load_string(contents, filename=path)
template.mtime = mtime
template.path = path
self.cache[name] = template
return template
|
If not yet in the cache, load the named template and compiles it,
placing it into the cache.
If in cache, return the cached template.
|
train
|
https://github.com/tetframework/Tonnikala/blob/99d168657da1b2372ff898254f80808ea8d1b83f/tonnikala/loader.py#L253-L281
|
[
"def load_string(self, string, filename=\"<string>\"):\n parser_func = parsers.get(self.syntax)\n if not parser_func:\n raise ValueError(\"Invalid parser syntax %s: valid syntaxes: %r\"\n % sorted(parsers.keys()))\n\n try:\n tree = parser_func(filename, string, translatable=self.translatable)\n gen = PythonGenerator(tree)\n code = gen.generate_ast()\n exc_info = None\n except exceptions.TemplateSyntaxError as e:\n if e.source is None:\n e.source = string\n if e.filename is None:\n e.filename = filename\n\n exc_info = sys.exc_info()\n\n if exc_info:\n self.handle_exception(exc_info, string, tb_override=None)\n\n if self.debug:\n import ast\n\n print(ast.dump(code, True, True))\n\n try:\n import astor\n print(astor.codegen.to_source(code))\n except ImportError:\n print(\"Not reversing AST to source as astor was not installed\")\n\n runtime = self.runtime()\n runtime.loader = self\n glob = _new_globals(runtime)\n\n compiled = compile(code, filename, 'exec')\n glob['__TK_template_info__'] = TemplateInfo(filename, gen.lnotab_info())\n\n exec(compiled, glob, glob)\n\n template_func = glob['__TK__binder']\n return Template(template_func)\n",
"def resolve(self, name):\n if os.path.isabs(name):\n if os.path.exists(name):\n return name\n\n for i in self.paths:\n path = os.path.abspath(os.path.join(i, name))\n if os.path.exists(path):\n return path\n\n return None\n",
"def _maybe_purge_cache(self):\n \"\"\"\n If enough time since last check has elapsed, check if any\n of the cached templates has changed. If any of the template\n files were deleted, remove that file only. If any were\n changed, then purge the entire cache.\n \"\"\"\n\n if self._last_reload_check + MIN_CHECK_INTERVAL > time.time():\n return\n\n for name, tmpl in list(self.cache.items()):\n if not os.stat(tmpl.path):\n self.cache.pop(name)\n continue\n\n if os.stat(tmpl.path).st_mtime > tmpl.mtime:\n self.cache.clear()\n break\n\n self._last_reload_check = time.time()\n"
] |
class FileLoader(Loader):
def __init__(self, paths=[], debug=False, syntax='tonnikala', *args, **kwargs):
super(FileLoader, self).__init__(*args, debug=debug, syntax=syntax, **kwargs)
self.cache = {}
self.paths = list(paths)
self.reload = False
self._last_reload_check = time.time()
def add_path(self, *a):
self.paths.extend(a)
def resolve(self, name):
if os.path.isabs(name):
if os.path.exists(name):
return name
for i in self.paths:
path = os.path.abspath(os.path.join(i, name))
if os.path.exists(path):
return path
return None
def set_reload(self, flag):
self.reload = flag
def _maybe_purge_cache(self):
"""
If enough time since last check has elapsed, check if any
of the cached templates has changed. If any of the template
files were deleted, remove that file only. If any were
changed, then purge the entire cache.
"""
if self._last_reload_check + MIN_CHECK_INTERVAL > time.time():
return
for name, tmpl in list(self.cache.items()):
if not os.stat(tmpl.path):
self.cache.pop(name)
continue
if os.stat(tmpl.path).st_mtime > tmpl.mtime:
self.cache.clear()
break
self._last_reload_check = time.time()
|
tetframework/Tonnikala
|
tonnikala/runtime/debug.py
|
translate_exception
|
python
|
def translate_exception(exc_info, initial_skip=0):
tb = exc_info[2]
frames = []
# skip some internal frames if wanted
for x in range(initial_skip):
if tb is not None:
tb = tb.tb_next
initial_tb = tb
while tb is not None:
# skip frames decorated with @internalcode. These are internal
# calls we can't avoid and that are useless in template debugging
# output.
if tb.tb_frame.f_code in internal_code:
tb = tb.tb_next
continue
# save a reference to the next frame if we override the current
# one with a faked one.
next = tb.tb_next
# fake template exceptions
template = tb.tb_frame.f_globals.get('__TK_template_info__')
if template is not None:
lineno = template.get_corresponding_lineno(tb.tb_lineno)
tb = fake_exc_info(exc_info[:2] + (tb,), template.filename,
lineno)[2]
frames.append(make_frame_proxy(tb))
tb = next
# if we don't have any exceptions in the frames left, we have to reraise it unchanged. XXX: can we backup here? when could this happen?
if not frames:
reraise(exc_info[0], exc_info[1], exc_info[2])
return ProcessedTraceback(exc_info[0], exc_info[1], frames)
|
If passed an exc_info it will automatically rewrite the exceptions
all the way down to the correct line numbers and frames.
|
train
|
https://github.com/tetframework/Tonnikala/blob/99d168657da1b2372ff898254f80808ea8d1b83f/tonnikala/runtime/debug.py#L162-L201
|
[
"def reraise(tp, value, tb=None): # pragma: no cover\n if value is None:\n value = tp()\n if value.__traceback__ is not tb:\n raise value.with_traceback(tb)\n raise value\n",
"def make_frame_proxy(frame):\n proxy = TracebackFrameProxy(frame)\n if tproxy is None:\n return proxy\n\n def operation_handler(operation, *args, **kwargs):\n if operation in ('__getattribute__', '__getattr__'):\n return getattr(proxy, args[0])\n elif operation == '__setattr__':\n proxy.__setattr__(*args, **kwargs)\n else:\n return getattr(proxy, operation)(*args, **kwargs)\n\n return tproxy(TracebackType, operation_handler)\n",
"def fake_exc_info(exc_info, filename, lineno):\n \"\"\"Helper for `translate_exception`.\"\"\"\n exc_type, exc_value, tb = exc_info\n\n # figure the real context out\n if tb is not None:\n # if there is a local called __tonnikala_exception__, we get\n # rid of it to not break the debug functionality.\n locals = tb.tb_frame.f_locals.copy()\n locals.pop('__tonnikala_exception__', None)\n else:\n locals = {}\n\n # assemble fake globals we need\n globals = {\n '__name__': filename,\n '__file__': filename,\n '__tonnikala_exception__': exc_info[:2],\n\n # we don't want to keep the reference to the template around\n # to not cause circular dependencies, but we mark it as Tonnikala\n # frame for the ProcessedTraceback\n '__TK_template_info__': None\n }\n\n # and fake the exception\n lineno = lineno or 0\n code = compile('\\n' * (lineno - 1) + raise_helper, filename, 'exec')\n\n # if it's possible, change the name of the code. This won't work\n # on some python environments such as google appengine\n try:\n if tb is None:\n location = 'template'\n else:\n function = tb.tb_frame.f_code.co_name\n if function == '__main__':\n location = 'top-level template code'\n elif function.startswith('__TK__block__'):\n location = 'block \"%s\"' % function[13:]\n elif function.startswith('__TK__typed__'):\n functype = function[13:].split('__')[0].replace('_', ' ')\n location = functype\n elif function.startswith('__TK_'):\n location = 'template'\n else:\n location = 'def \"%s\"' % function\n\n if not PY2: # pragma: python3\n code = CodeType(0, code.co_kwonlyargcount, code.co_nlocals,\n code.co_stacksize,\n code.co_flags, code.co_code, code.co_consts,\n code.co_names, code.co_varnames, filename,\n location, code.co_firstlineno,\n code.co_lnotab, (), ())\n\n else: # pragma: python2\n code = CodeType(0, code.co_nlocals, code.co_stacksize,\n code.co_flags, code.co_code, code.co_consts,\n code.co_names, code.co_varnames, filename,\n location, code.co_firstlineno,\n code.co_lnotab, (), ())\n\n except Exception as e:\n pass\n\n # execute the code and catch the new traceback\n try:\n exec(code, globals, locals)\n except:\n exc_info = sys.exc_info()\n new_tb = exc_info[2].tb_next\n\n # return without this frame\n return exc_info[:2] + (new_tb,)\n"
] |
# -*- coding: utf-8 -*-
"""
tonnikala.runtime.debug
~~~~~~~~~~~~~~~~~~~~~~~
Implements the debug interface for Tonnikala. This module does some pretty
ugly stuff with the Python traceback system in order to achieve tracebacks
with correct line numbers, locals and contents.
Based on Jinja2 module `jinja2.debug`,
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import sys
import traceback
from types import TracebackType, CodeType
from .exceptions import TemplateSyntaxError
from ..compat import reraise, PY2
from ..helpers import internal_code
# on pypy we can take advantage of transparent proxies
try:
from __pypy__ import tproxy
except ImportError:
tproxy = None
''
# how does the raise helper look like?
try:
exec("raise TypeError, 'foo'")
except SyntaxError: # pragma: python3
raise_helper = '__tonnikala_exception__[1].__traceback__ = None; raise ' \
'__tonnikala_exception__[1]'
except TypeError: # pragma: python2
raise_helper = 'raise __tonnikala_exception__[0], ' \
'__tonnikala_exception__[1]'
class TracebackFrameProxy(object):
"""Proxies a traceback frame."""
def __init__(self, tb):
self.tb = tb
self._tb_next = None
@property
def tb_next(self):
return self._tb_next
def set_next(self, next):
if tb_set_next is not None:
try:
tb_set_next(self.tb, next and next.tb or None)
except Exception:
# this function can fail due to all the hackery it does
# on various python implementations. We just catch errors
# down and ignore them if necessary.
pass
self._tb_next = next
@property
def is_tonnikala_frame(self):
return '__tonnikala_template__' in self.tb.tb_frame.f_globals
def __getattr__(self, name):
return getattr(self.tb, name)
def make_frame_proxy(frame):
proxy = TracebackFrameProxy(frame)
if tproxy is None:
return proxy
def operation_handler(operation, *args, **kwargs):
if operation in ('__getattribute__', '__getattr__'):
return getattr(proxy, args[0])
elif operation == '__setattr__':
proxy.__setattr__(*args, **kwargs)
else:
return getattr(proxy, operation)(*args, **kwargs)
return tproxy(TracebackType, operation_handler)
class ProcessedTraceback(object):
"""Holds a Tonnikala preprocessed traceback for printing or reraising."""
def __init__(self, exc_type, exc_value, frames):
assert frames, 'no frames for this traceback?'
self.exc_type = exc_type
self.exc_value = exc_value
self.frames = frames
# newly concatenate the frames (which are proxies)
prev_tb = None
for tb in self.frames:
if prev_tb is not None:
prev_tb.set_next(tb)
prev_tb = tb
prev_tb.set_next(None)
def render_as_text(self, limit=None):
"""Return a string with the traceback."""
lines = traceback.format_exception(self.exc_type, self.exc_value,
self.frames[0], limit=limit)
return ''.join(lines).rstrip()
def render_as_html(self, full=False):
"""Return a unicode string with the traceback as rendered HTML."""
from jinja2.debugrenderer import render_traceback
return u'%s\n\n<!--\n%s\n-->' % (
render_traceback(self, full=full),
self.render_as_text().decode('utf-8', 'replace')
)
@property
def is_template_syntax_error(self):
"""`True` if this is a template syntax error."""
return isinstance(self.exc_value, TemplateSyntaxError)
@property
def exc_info(self):
"""Exception info tuple with a proxy around the frame objects."""
return self.exc_type, self.exc_value, self.frames[0]
@property
def standard_exc_info(self):
"""Standard python exc_info for re-raising"""
tb = self.frames[0]
# the frame will be an actual traceback (or transparent proxy) if
# we are on pypy or a python implementation with support for tproxy
if type(tb) is not TracebackType:
tb = tb.tb
return self.exc_type, self.exc_value, tb
def make_traceback(exc_info, source_hint=None):
"""Creates a processed traceback object from the exc_info."""
exc_type, exc_value, tb = exc_info
if isinstance(exc_value, TemplateSyntaxError):
exc_info = translate_syntax_error(exc_value, source_hint)
initial_skip = 0
else:
initial_skip = 1
return translate_exception(exc_info, initial_skip)
def translate_syntax_error(error, source=None):
"""Rewrites a syntax error to please traceback systems."""
error.source = source
error.translated = True
exc_info = (error.__class__, error, None)
filename = error.filename
if filename is None:
filename = '<unknown>'
return fake_exc_info(exc_info, filename, error.lineno)
def fake_exc_info(exc_info, filename, lineno):
"""Helper for `translate_exception`."""
exc_type, exc_value, tb = exc_info
# figure the real context out
if tb is not None:
# if there is a local called __tonnikala_exception__, we get
# rid of it to not break the debug functionality.
locals = tb.tb_frame.f_locals.copy()
locals.pop('__tonnikala_exception__', None)
else:
locals = {}
# assemble fake globals we need
globals = {
'__name__': filename,
'__file__': filename,
'__tonnikala_exception__': exc_info[:2],
# we don't want to keep the reference to the template around
# to not cause circular dependencies, but we mark it as Tonnikala
# frame for the ProcessedTraceback
'__TK_template_info__': None
}
# and fake the exception
lineno = lineno or 0
code = compile('\n' * (lineno - 1) + raise_helper, filename, 'exec')
# if it's possible, change the name of the code. This won't work
# on some python environments such as google appengine
try:
if tb is None:
location = 'template'
else:
function = tb.tb_frame.f_code.co_name
if function == '__main__':
location = 'top-level template code'
elif function.startswith('__TK__block__'):
location = 'block "%s"' % function[13:]
elif function.startswith('__TK__typed__'):
functype = function[13:].split('__')[0].replace('_', ' ')
location = functype
elif function.startswith('__TK_'):
location = 'template'
else:
location = 'def "%s"' % function
if not PY2: # pragma: python3
code = CodeType(0, code.co_kwonlyargcount, code.co_nlocals,
code.co_stacksize,
code.co_flags, code.co_code, code.co_consts,
code.co_names, code.co_varnames, filename,
location, code.co_firstlineno,
code.co_lnotab, (), ())
else: # pragma: python2
code = CodeType(0, code.co_nlocals, code.co_stacksize,
code.co_flags, code.co_code, code.co_consts,
code.co_names, code.co_varnames, filename,
location, code.co_firstlineno,
code.co_lnotab, (), ())
except Exception as e:
pass
# execute the code and catch the new traceback
try:
exec(code, globals, locals)
except:
exc_info = sys.exc_info()
new_tb = exc_info[2].tb_next
# return without this frame
return exc_info[:2] + (new_tb,)
def _init_ugly_crap():
"""This function implements a few ugly things so that we can patch the
traceback objects. The function returned allows resetting `tb_next` on
any python traceback object. Do not attempt to use this on non cpython
interpreters
"""
import ctypes
from types import TracebackType
# figure out size of _Py_ssize_t
_Py_ssize_t = ctypes.c_int
if PY2: # pragma: python2
if hasattr(ctypes.pythonapi, 'Py_InitModule4_64'): # pragma: no cover
_Py_ssize_t = ctypes.c_int64
else: # pragma: no cover
_Py_ssize_t = ctypes.c_int
else:
_Py_ssize_t = ctypes.c_ssize_t
if hasattr(sys, 'getobjects'): # pragma: no cover
# cannot support this, as don't have access to it
raise Exception('traceback hacking not supported on tracing Python builds')
# this isn't the full structure definition but we don't need the rest anyway,
# these are enough here. All struct pointers being compatible we use a wrong
# struct pointer for ob_type.
class _Traceback(ctypes.Structure):
pass
_Traceback._fields_ = [
('ob_refcnt', _Py_ssize_t),
('ob_type', ctypes.POINTER(_Traceback)),
('tb_next', ctypes.POINTER(_Traceback)),
]
def tb_set_next(tb, next):
"""Set the tb_next attribute of a traceback object."""
if not (isinstance(tb, TracebackType) and
(next is None or isinstance(next, TracebackType))):
raise TypeError('tb_set_next arguments must be traceback objects')
obj = _Traceback.from_address(id(tb))
if tb.tb_next is not None:
old = _Traceback.from_address(id(tb.tb_next))
old.ob_refcnt -= 1
if next is None:
obj.tb_next = ctypes.POINTER(_Traceback)()
else:
next = _Traceback.from_address(id(next))
next.ob_refcnt += 1
obj.tb_next = ctypes.pointer(next)
return tb_set_next
# try to get a tb_set_next implementation if we don't have transparent
# proxies.
tb_set_next = None
if tproxy is None: # pragma: no cover
try:
tb_set_next = _init_ugly_crap()
except Exception:
pass
del _init_ugly_crap
|
tetframework/Tonnikala
|
tonnikala/runtime/debug.py
|
fake_exc_info
|
python
|
def fake_exc_info(exc_info, filename, lineno):
exc_type, exc_value, tb = exc_info
# figure the real context out
if tb is not None:
# if there is a local called __tonnikala_exception__, we get
# rid of it to not break the debug functionality.
locals = tb.tb_frame.f_locals.copy()
locals.pop('__tonnikala_exception__', None)
else:
locals = {}
# assemble fake globals we need
globals = {
'__name__': filename,
'__file__': filename,
'__tonnikala_exception__': exc_info[:2],
# we don't want to keep the reference to the template around
# to not cause circular dependencies, but we mark it as Tonnikala
# frame for the ProcessedTraceback
'__TK_template_info__': None
}
# and fake the exception
lineno = lineno or 0
code = compile('\n' * (lineno - 1) + raise_helper, filename, 'exec')
# if it's possible, change the name of the code. This won't work
# on some python environments such as google appengine
try:
if tb is None:
location = 'template'
else:
function = tb.tb_frame.f_code.co_name
if function == '__main__':
location = 'top-level template code'
elif function.startswith('__TK__block__'):
location = 'block "%s"' % function[13:]
elif function.startswith('__TK__typed__'):
functype = function[13:].split('__')[0].replace('_', ' ')
location = functype
elif function.startswith('__TK_'):
location = 'template'
else:
location = 'def "%s"' % function
if not PY2: # pragma: python3
code = CodeType(0, code.co_kwonlyargcount, code.co_nlocals,
code.co_stacksize,
code.co_flags, code.co_code, code.co_consts,
code.co_names, code.co_varnames, filename,
location, code.co_firstlineno,
code.co_lnotab, (), ())
else: # pragma: python2
code = CodeType(0, code.co_nlocals, code.co_stacksize,
code.co_flags, code.co_code, code.co_consts,
code.co_names, code.co_varnames, filename,
location, code.co_firstlineno,
code.co_lnotab, (), ())
except Exception as e:
pass
# execute the code and catch the new traceback
try:
exec(code, globals, locals)
except:
exc_info = sys.exc_info()
new_tb = exc_info[2].tb_next
# return without this frame
return exc_info[:2] + (new_tb,)
|
Helper for `translate_exception`.
|
train
|
https://github.com/tetframework/Tonnikala/blob/99d168657da1b2372ff898254f80808ea8d1b83f/tonnikala/runtime/debug.py#L204-L278
| null |
# -*- coding: utf-8 -*-
"""
tonnikala.runtime.debug
~~~~~~~~~~~~~~~~~~~~~~~
Implements the debug interface for Tonnikala. This module does some pretty
ugly stuff with the Python traceback system in order to achieve tracebacks
with correct line numbers, locals and contents.
Based on Jinja2 module `jinja2.debug`,
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import sys
import traceback
from types import TracebackType, CodeType
from .exceptions import TemplateSyntaxError
from ..compat import reraise, PY2
from ..helpers import internal_code
# on pypy we can take advantage of transparent proxies
try:
from __pypy__ import tproxy
except ImportError:
tproxy = None
''
# how does the raise helper look like?
try:
exec("raise TypeError, 'foo'")
except SyntaxError: # pragma: python3
raise_helper = '__tonnikala_exception__[1].__traceback__ = None; raise ' \
'__tonnikala_exception__[1]'
except TypeError: # pragma: python2
raise_helper = 'raise __tonnikala_exception__[0], ' \
'__tonnikala_exception__[1]'
class TracebackFrameProxy(object):
"""Proxies a traceback frame."""
def __init__(self, tb):
self.tb = tb
self._tb_next = None
@property
def tb_next(self):
return self._tb_next
def set_next(self, next):
if tb_set_next is not None:
try:
tb_set_next(self.tb, next and next.tb or None)
except Exception:
# this function can fail due to all the hackery it does
# on various python implementations. We just catch errors
# down and ignore them if necessary.
pass
self._tb_next = next
@property
def is_tonnikala_frame(self):
return '__tonnikala_template__' in self.tb.tb_frame.f_globals
def __getattr__(self, name):
return getattr(self.tb, name)
def make_frame_proxy(frame):
proxy = TracebackFrameProxy(frame)
if tproxy is None:
return proxy
def operation_handler(operation, *args, **kwargs):
if operation in ('__getattribute__', '__getattr__'):
return getattr(proxy, args[0])
elif operation == '__setattr__':
proxy.__setattr__(*args, **kwargs)
else:
return getattr(proxy, operation)(*args, **kwargs)
return tproxy(TracebackType, operation_handler)
class ProcessedTraceback(object):
"""Holds a Tonnikala preprocessed traceback for printing or reraising."""
def __init__(self, exc_type, exc_value, frames):
assert frames, 'no frames for this traceback?'
self.exc_type = exc_type
self.exc_value = exc_value
self.frames = frames
# newly concatenate the frames (which are proxies)
prev_tb = None
for tb in self.frames:
if prev_tb is not None:
prev_tb.set_next(tb)
prev_tb = tb
prev_tb.set_next(None)
def render_as_text(self, limit=None):
"""Return a string with the traceback."""
lines = traceback.format_exception(self.exc_type, self.exc_value,
self.frames[0], limit=limit)
return ''.join(lines).rstrip()
def render_as_html(self, full=False):
"""Return a unicode string with the traceback as rendered HTML."""
from jinja2.debugrenderer import render_traceback
return u'%s\n\n<!--\n%s\n-->' % (
render_traceback(self, full=full),
self.render_as_text().decode('utf-8', 'replace')
)
@property
def is_template_syntax_error(self):
"""`True` if this is a template syntax error."""
return isinstance(self.exc_value, TemplateSyntaxError)
@property
def exc_info(self):
"""Exception info tuple with a proxy around the frame objects."""
return self.exc_type, self.exc_value, self.frames[0]
@property
def standard_exc_info(self):
"""Standard python exc_info for re-raising"""
tb = self.frames[0]
# the frame will be an actual traceback (or transparent proxy) if
# we are on pypy or a python implementation with support for tproxy
if type(tb) is not TracebackType:
tb = tb.tb
return self.exc_type, self.exc_value, tb
def make_traceback(exc_info, source_hint=None):
"""Creates a processed traceback object from the exc_info."""
exc_type, exc_value, tb = exc_info
if isinstance(exc_value, TemplateSyntaxError):
exc_info = translate_syntax_error(exc_value, source_hint)
initial_skip = 0
else:
initial_skip = 1
return translate_exception(exc_info, initial_skip)
def translate_syntax_error(error, source=None):
"""Rewrites a syntax error to please traceback systems."""
error.source = source
error.translated = True
exc_info = (error.__class__, error, None)
filename = error.filename
if filename is None:
filename = '<unknown>'
return fake_exc_info(exc_info, filename, error.lineno)
def translate_exception(exc_info, initial_skip=0):
"""If passed an exc_info it will automatically rewrite the exceptions
all the way down to the correct line numbers and frames.
"""
tb = exc_info[2]
frames = []
# skip some internal frames if wanted
for x in range(initial_skip):
if tb is not None:
tb = tb.tb_next
initial_tb = tb
while tb is not None:
# skip frames decorated with @internalcode. These are internal
# calls we can't avoid and that are useless in template debugging
# output.
if tb.tb_frame.f_code in internal_code:
tb = tb.tb_next
continue
# save a reference to the next frame if we override the current
# one with a faked one.
next = tb.tb_next
# fake template exceptions
template = tb.tb_frame.f_globals.get('__TK_template_info__')
if template is not None:
lineno = template.get_corresponding_lineno(tb.tb_lineno)
tb = fake_exc_info(exc_info[:2] + (tb,), template.filename,
lineno)[2]
frames.append(make_frame_proxy(tb))
tb = next
# if we don't have any exceptions in the frames left, we have to reraise it unchanged. XXX: can we backup here? when could this happen?
if not frames:
reraise(exc_info[0], exc_info[1], exc_info[2])
return ProcessedTraceback(exc_info[0], exc_info[1], frames)
def _init_ugly_crap():
"""This function implements a few ugly things so that we can patch the
traceback objects. The function returned allows resetting `tb_next` on
any python traceback object. Do not attempt to use this on non cpython
interpreters
"""
import ctypes
from types import TracebackType
# figure out size of _Py_ssize_t
_Py_ssize_t = ctypes.c_int
if PY2: # pragma: python2
if hasattr(ctypes.pythonapi, 'Py_InitModule4_64'): # pragma: no cover
_Py_ssize_t = ctypes.c_int64
else: # pragma: no cover
_Py_ssize_t = ctypes.c_int
else:
_Py_ssize_t = ctypes.c_ssize_t
if hasattr(sys, 'getobjects'): # pragma: no cover
# cannot support this, as don't have access to it
raise Exception('traceback hacking not supported on tracing Python builds')
# this isn't the full structure definition but we don't need the rest anyway,
# these are enough here. All struct pointers being compatible we use a wrong
# struct pointer for ob_type.
class _Traceback(ctypes.Structure):
pass
_Traceback._fields_ = [
('ob_refcnt', _Py_ssize_t),
('ob_type', ctypes.POINTER(_Traceback)),
('tb_next', ctypes.POINTER(_Traceback)),
]
def tb_set_next(tb, next):
"""Set the tb_next attribute of a traceback object."""
if not (isinstance(tb, TracebackType) and
(next is None or isinstance(next, TracebackType))):
raise TypeError('tb_set_next arguments must be traceback objects')
obj = _Traceback.from_address(id(tb))
if tb.tb_next is not None:
old = _Traceback.from_address(id(tb.tb_next))
old.ob_refcnt -= 1
if next is None:
obj.tb_next = ctypes.POINTER(_Traceback)()
else:
next = _Traceback.from_address(id(next))
next.ob_refcnt += 1
obj.tb_next = ctypes.pointer(next)
return tb_set_next
# try to get a tb_set_next implementation if we don't have transparent
# proxies.
tb_set_next = None
if tproxy is None: # pragma: no cover
try:
tb_set_next = _init_ugly_crap()
except Exception:
pass
del _init_ugly_crap
|
collectiveacuity/jsonModel
|
jsonmodel/extensions.py
|
tabulate
|
python
|
def tabulate(json_model):
'''
a function to add the tabulate method to a jsonModel object
:param json_model: jsonModel object
:return: jsonModel object
'''
import types
from jsonmodel._extensions import tabulate as _tabulate
try:
from tabulate import tabulate
except:
import sys
print('jsonmodel.extensions.tabulate requires the tabulate module. try: pip install tabulate')
sys.exit(1)
setattr(json_model, 'tabulate', _tabulate.__get__(json_model, types.MethodType))
return json_model
|
a function to add the tabulate method to a jsonModel object
:param json_model: jsonModel object
:return: jsonModel object
|
train
|
https://github.com/collectiveacuity/jsonModel/blob/1ea64c36d78add3faa7b85ff82c5ec685458c940/jsonmodel/extensions.py#L6-L26
| null |
''' a package of extensions to a jsonModel class object '''
__author__ = 'rcj1492'
__created__ = '2018.03'
__license__ = 'MIT'
def tabulate(json_model):
'''
a function to add the tabulate method to a jsonModel object
:param json_model: jsonModel object
:return: jsonModel object
'''
import types
from jsonmodel._extensions import tabulate as _tabulate
try:
from tabulate import tabulate
except:
import sys
print('jsonmodel.extensions.tabulate requires the tabulate module. try: pip install tabulate')
sys.exit(1)
setattr(json_model, 'tabulate', _tabulate.__get__(json_model, types.MethodType))
return json_model
if __name__ == '__main__':
from jsonmodel import __module__
from jsonmodel.loader import jsonLoader
from jsonmodel.validators import jsonModel
model_rules = jsonLoader(__module__, '../samples/sample-model.json')
model_rules['components']['.']['extra_fields'] = True
model_rules['components']['.datetime']['field_description'] = 'https://collectiveacuity.com'
rules_model = jsonModel(model_rules)
rules_model = tabulate(rules_model)
documentation = rules_model.tabulate(syntax='javascript')
with open('../docs/test.md', 'wt') as f:
f.write(documentation)
f.close()
|
collectiveacuity/jsonModel
|
jsonmodel/validators.py
|
jsonModel._evaluate_field
|
python
|
def _evaluate_field(self, record_dict, field_name, field_criteria):
''' a helper method for evaluating record values based upon query criteria
:param record_dict: dictionary with model valid data to evaluate
:param field_name: string with path to root of query field
:param field_criteria: dictionary with query operators and qualifiers
:return: boolean (True if no field_criteria evaluate to false)
'''
# determine value existence criteria
value_exists = True
if 'value_exists' in field_criteria.keys():
if not field_criteria['value_exists']:
value_exists = False
# validate existence of field
field_exists = True
try:
record_values = self._walk(field_name, record_dict)
except:
field_exists = False
# evaluate existence query criteria
if value_exists != field_exists:
return False
elif not value_exists:
return True
# convert javascript dot_path to class dot_path
field_key = field_name
if not field_name:
field_key = '.'
else:
if field_name[0] != '.':
field_key = '.%s' % field_name
# evaluate other query criteria
for key, value in field_criteria.items():
if key in ('min_size', 'min_length'):
found = False
if self.keyMap[field_key]['value_datatype'] == 'map':
for record_value in record_values:
record_size = sys.getsizeof(json.dumps(str(record_value)).replace(' ','')) - 51
if record_size >= value:
found = True
break
else:
for record_value in record_values:
if len(record_value) >= value:
found = True
break
if not found:
return False
elif key in ('max_size', 'max_length'):
found = False
if self.keyMap[field_key]['value_datatype'] == 'map':
for record_value in record_values:
record_size = sys.getsizeof(json.dumps(str(record_value)).replace(' ','')) - 51
if record_size <= value:
found = True
break
else:
for record_value in record_values:
if len(record_value) <= value:
found = True
break
if not found:
return False
elif key == 'min_value':
found = False
for record_value in record_values:
if record_value >= value:
found = True
break
if not found:
return False
elif key == 'max_value':
found = False
for record_value in record_values:
if record_value <= value:
found = True
break
if not found:
return False
elif key == 'equal_to':
found = False
for record_value in record_values:
if record_value == value:
found = True
break
if not found:
return False
elif key == 'greater_than':
found = False
for record_value in record_values:
if record_value > value:
found = True
break
if not found:
return False
elif key == 'less_than':
found = False
for record_value in record_values:
if record_value < value:
found = True
break
if not found:
return False
elif key == 'excluded_values':
for record_value in record_values:
if record_value in value:
return False
elif key == 'discrete_values':
found = False
for record_value in record_values:
if record_value in value:
found = True
break
if not found:
return False
elif key == 'integer_data':
found = False
dummy_int = 1
for record_value in record_values:
if record_value.__class__ == dummy_int.__class__:
found = True
break
if value != found:
return False
elif key == 'byte_data':
found = False
for record_value in record_values:
try:
decoded_bytes = b64decode(record_value)
except:
decoded_bytes = ''
if isinstance(decoded_bytes, bytes):
found = True
break
if value != found:
return False
elif key == 'must_contain':
for regex in value:
regex_pattern = re.compile(regex)
found = False
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if not found:
return False
elif key == 'must_not_contain':
for regex in value:
regex_pattern = re.compile(regex)
found = False
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if found:
return False
elif key == 'contains_either':
found = False
for regex in value:
regex_pattern = re.compile(regex)
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if found:
break
if not found:
return False
elif key == 'unique_values':
for record_value in record_values:
unique_values = True
if len(record_value) != len(set(record_value)):
unique_values = False
if value != unique_values:
return False
return True
|
a helper method for evaluating record values based upon query criteria
:param record_dict: dictionary with model valid data to evaluate
:param field_name: string with path to root of query field
:param field_criteria: dictionary with query operators and qualifiers
:return: boolean (True if no field_criteria evaluate to false)
|
train
|
https://github.com/collectiveacuity/jsonModel/blob/1ea64c36d78add3faa7b85ff82c5ec685458c940/jsonmodel/validators.py#L548-L730
| null |
class jsonModel(object):
__rules__ = jsonLoader('jsonmodel', 'models/model-rules.json')
def __init__(self, data_model, query_rules=None):
'''
a method for testing data model declaration & initializing the class
:param data_model: dictionary with json model architecture
:param query_rules: [optional] dictionary with valid field type qualifiers
:return: object with jsonModel methods
'''
# validate schema input
if not isinstance(data_model, dict):
raise ModelValidationError('Data model must be a dictionary.')
elif 'schema' not in data_model.keys():
raise ModelValidationError('Data model must have a schema key.')
elif not isinstance(data_model['schema'], dict):
raise ModelValidationError('Value for the data model "schema" field must be a dictionary.')
elif not data_model['schema']:
raise ModelValidationError('Data model "schema" field must not be empty.')
# construct base methods
from copy import deepcopy
data_model = deepcopy(data_model)
self.schema = data_model['schema']
model_map = mapModel(self.schema)
self.keyName = model_map.keyName
self.keyCriteria = model_map.keyCriteria
# construct protected type classes
self._datatype_names = mapModel._datatype_names
self._datatype_classes = mapModel._datatype_classes
# validate absence of item designators in keys
item_pattern = re.compile('\[\d+\]')
for i in range(len(self.keyName)):
patterns_found = item_pattern.findall(self.keyName[i])
if patterns_found:
for designator in patterns_found:
if designator != '[0]':
message = 'Key name for schema field %s must not contain the item designator pattern %s' % (self.keyName[i], designator)
raise ModelValidationError(message)
# validate existence of first item in list declarations
key_set = set(self.keyName)
for i in range(len(self.keyName)):
if self.keyCriteria[i]['value_datatype'] == 'list':
item_key = self.keyName[i] + '[0]'
if not item_key in key_set:
message = 'Schema field %s must declare an initial item for the list.' % self.keyName[i]
raise ModelValidationError(message)
# alter list requirement if first item is empty
else:
item_index = self.keyName.index(item_key)
if not self.keyCriteria[item_index]['required_field']:
self.keyCriteria[i]['required_field'] = False
# validate title input & construct title method
self.title = ''
if 'title' in data_model.keys():
if not isinstance(data_model['title'], str):
raise ModelValidationError('Value for model title must be a string.')
self.title = data_model['title']
# validate description input & construct description method
self.description = ''
if 'description' in data_model.keys():
if not isinstance(data_model['description'], str):
raise ModelValidationError('Value for model description must be a string.')
self.description = data_model['description']
# validate url input & construct title method
self.url = ''
if 'url' in data_model.keys():
if not isinstance(data_model['url'], str):
raise ModelValidationError('Value for model url must be a string.')
self.title = data_model['url']
# validate metadata input & construct metadata method
self.metadata = {}
if 'metadata' in data_model.keys():
if not isinstance(data_model['metadata'], dict):
raise ModelValidationError('Value for model metadata must be a dictionary.')
self.metadata = data_model['metadata']
# validate max size input & construct maxSize property
# self.maxSize = None
# if 'max_size' in data_model.keys():
# if not isinstance(data_model['max_size'], int):
# raise ModelValidationError('Value for model max_size must be a positive integer.')
# elif data_model['max_size'] < 0:
# raise ModelValidationError('Value for model max_size must be a positive integer.')
# elif data_model['max_size']:
# self.maxSize = data_model['max_size']
# validate components input & construct component property
self.components = {}
if 'components' in data_model.keys():
if not isinstance(data_model['components'], dict):
raise ModelValidationError('Value for model components must be a dictionary.')
self.components = self._validate_fields(data_model['components'], self.__rules__['components'])
# construct keyMap fields from key names and key criteria
self.keyMap = {}
for i in range(len(self.keyName)):
self.keyMap[self.keyName[i]] = self.keyCriteria[i]
for key, value in self.components.items():
# convert javascript dot_path to class dot_path
dot_key = ''
if not key:
dot_key = '.'
else:
if key[0] != '.':
dot_key = '.%s' % key
# add component declarations to keyMap
if key in self.keyMap.keys():
for k, v in self.components[key].items():
self.keyMap[key][k] = v
elif dot_key and dot_key in self.keyMap.keys():
for k, v in self.components[key].items():
self.keyMap[dot_key][k] = v
# validate default values in lists
self._validate_defaults(self.keyMap)
# construct queryRules property from class model rules
self.queryRules = {}
for key, value in self.__rules__['components'].items():
remove_from_query = [ 'required_field', 'default_value', 'example_values', 'field_title', 'field_position', 'field_description', 'field_metadata', 'extra_fields' ]
field_qualifiers = {
'value_exists': False
}
for k, v in value.items():
if k not in remove_from_query:
field_qualifiers[k] = v
self.queryRules[key] = field_qualifiers
# validate query rules input and replace queryRules property
if query_rules:
if not isinstance(query_rules, dict):
message = 'Value for query rules input must be a dictionary.'
raise ModelValidationError(message)
input_set = set(query_rules.keys())
req_set = set(self.queryRules.keys())
if input_set - req_set:
message = 'Query rules input may only have %s field key names.' % req_set
raise ModelValidationError(message)
elif req_set - input_set:
message = 'Query rules input must have all %s field key names.' % req_set
raise ModelValidationError(message)
for key in req_set:
if not isinstance(query_rules[key], dict):
message = 'Value for query rules %s field must be a dictionary.' % key
raise ModelValidationError(message)
input_qualifier_set = set(query_rules[key].keys())
req_qualifier_set = set(self.queryRules[key].keys())
if input_qualifier_set - req_qualifier_set:
message = 'Query rules field %s may only have qualifiers %s' % (key, req_qualifier_set)
raise ModelValidationError(message)
for k, v in query_rules[key].items():
if v.__class__ != self.queryRules[key][k].__class__:
qualifier_index = self._datatype_classes.index(self.queryRules[key][k].__class__)
qualifier_type = self._datatype_names[qualifier_index]
message = 'Value for query rules field %s qualifier %s must be a "%s" datatype.' % (key, k, qualifier_type)
raise ModelValidationError(message)
self.queryRules = query_rules
def _validate_fields(self, fields_dict, fields_rules, declared_value=True):
# validate key names in fields
for key, value in fields_dict.items():
# convert javascript dot_path to class dot_path
if not key:
key = '.'
else:
if key[0] != '.':
key = '.%s' % key
if key not in self.keyName:
raise ModelValidationError('Field %s is not a field declared in model schema.' % key)
elif not isinstance(value, dict):
raise ModelValidationError('Value for field %s must be a dictionary.' % key)
# validate field criteria are appropriate to field datatype
value_type = self.keyCriteria[self.keyName.index(key)]['value_datatype']
type_dict = {}
if value_type == 'string':
type_dict = fields_rules['.string_fields']
elif value_type == 'number':
type_dict = fields_rules['.number_fields']
elif value_type == 'boolean':
type_dict = fields_rules['.boolean_fields']
elif value_type == 'list':
type_dict = fields_rules['.list_fields']
elif value_type == 'map':
type_dict = fields_rules['.map_fields']
elif value_type == 'null':
type_dict = fields_rules['.null_fields']
if set(value.keys()) - set(type_dict.keys()):
raise ModelValidationError('Field %s may only have datatype %s qualifiers %s.' % (key, value_type, set(type_dict.keys())))
# validate criteria qualifier values are appropriate datatype
for k, v in value.items():
v_index = self._datatype_classes.index(v.__class__)
v_type = self._datatype_names[v_index]
qualifier_index = self._datatype_classes.index(type_dict[k].__class__)
qualifier_type = self._datatype_names[qualifier_index]
if v_type != qualifier_type:
message = 'Value for field %s qualifier %s must be a %s datatype.' % (key, k, qualifier_type)
raise ModelValidationError(message)
if qualifier_type == 'number':
if isinstance(type_dict[k], int):
if not isinstance(v, int):
message = 'Value for field %s qualifier %s must be an integer.' % (key, k)
raise ModelValidationError(message)
# validate internal logic of each qualifier value declaration
if k in ('must_not_contain', 'must_contain', 'contains_either'):
for item in v:
if not isinstance(item, str):
message = 'Each item in list field %s qualifier %s must be a string.' % (key, k)
raise ModelValidationError(message)
if k in ('min_length', 'max_length', 'min_size', 'max_size'):
if v < 0:
message = 'Value for field %s qualifier %s cannot be negative.' % (key, k)
raise ModelValidationError(message)
if k in ('discrete_values', 'excluded_values', 'example_values'):
for item in v:
if value_type == 'number':
if not isinstance(item, int) and not isinstance(item, float):
message = 'Each item in field %s qualifier %s list must be a number.' % (key, k)
raise ModelValidationError(message)
elif not isinstance(item, str):
message = 'Each item in list for field %s qualifier %s must be a string.' % (key, k)
raise ModelValidationError(message)
if k == 'identical_to':
if not v in self.keyName:
message = 'Value "%s" for field %s qualifier %s not found in components keys.' % (v, key, k)
raise ModelValidationError(message)
if k == 'unique_values':
if v:
item_name = key + '[0]'
item_type = self.keyCriteria[self.keyName.index(item_name)]['value_datatype']
if not item_type in ('number', 'string'):
message = 'Field %s[0] must be either a string or number if qualifier "unique_values": true' % key
raise ModelValidationError(message)
# validate lack of other qualifiers if value exist is false
if 'value_exists' in value.keys():
if not value['value_exists']:
if set(value.keys()) - {'value_exists'}:
message = 'If field %s qualifier value_exists: false, field may not have other qualifiers.' % key
raise ModelValidationError(message)
# validate size qualifiers against each other
size_qualifiers = ['min_size', 'max_size']
for qualifier in size_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
header = 'Value %s for %s' % (test_value, value_path)
if 'min_size' in value.keys():
if test_value < value['min_size']:
message = '%s must not be less than "min_size": %s' % (header, value['min_size'])
raise ModelValidationError(message)
if 'max_size' in value.keys():
if test_value > value['max_size']:
message = '%s must not be greater than "max_size": %s' % (header, value['max_size'])
raise ModelValidationError(message)
# validate length qualifiers against each other
length_qualifiers = ['min_length', 'max_length']
for qualifier in length_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
header = 'Value %s for %s' % (test_value, value_path)
if 'min_length' in value.keys():
if test_value < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if test_value > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
# validate range qualifiers against each other & length qualifiers
range_qualifiers = ['min_value', 'max_value', 'greater_than', 'less_than', 'equal_to']
for qualifier in range_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
header = 'Value %s%s%s for %s' % (quote_text, test_value, quote_text, value_path)
if 'min_value' in value.keys():
if test_value < value['min_value']:
message = '%s must not be less than "min_value": %s' % (header, value['min_value'])
raise ModelValidationError(message)
if 'max_value' in value.keys():
if test_value > value['max_value']:
message = '%s must not be greater than "max_value": %s' % (header, value['max_value'])
raise ModelValidationError(message)
if 'greater_than' in value.keys():
if test_value <= value['greater_than'] and not qualifier == 'greater_than':
message = '%s must be "greater_than": %s' % (header, value['greater_than'])
raise ModelValidationError(message)
if 'less_than' in value.keys():
if test_value >= value['less_than'] and not qualifier == 'less_than':
message = '%s must be "less_than": %s' % (header, value['less_than'])
raise ModelValidationError(message)
if 'min_length' in value.keys():
if len(test_value) < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if len(test_value) > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
if 'integer_data' in value.keys():
if value['integer_data']:
if not isinstance(test_value, int):
message = '%s must be an "integer_data".' % header
raise ModelValidationError(message)
if 'must_not_contain' in value.keys():
for regex in value['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
message = '%s matches regex pattern in "must_not_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'must_contain' in value.keys():
for regex in value['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(test_value):
message = '%s does not match regex pattern in "must_contain": ["%s"].' % (header, regex)
raise ModelValidationError(message)
if 'contains_either' in value.keys():
regex_match = False
regex_patterns = []
for regex in value['contains_either']:
regex_patterns.append(regex)
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
regex_match = True
if not regex_match:
message = '%s does not match any regex patterns in "contains_either": %s' % (header, regex_patterns)
raise ModelValidationError(message)
if 'byte_data' in value.keys():
if value['byte_data']:
if qualifier != 'equal_to':
message = '%s cannot be used with base64 encoded "byte_data".' % header
raise ModelValidationError(message)
# validate discrete value qualifiers against other criteria
schema_field = self.keyCriteria[self.keyName.index(key)]
discrete_qualifiers = ['declared_value', 'default_value', 'excluded_values', 'discrete_values', 'example_values']
for qualifier in discrete_qualifiers:
test_qualifier = False
if qualifier in schema_field:
test_qualifier = True
if qualifier == 'declared_value' and not schema_field[qualifier]:
test_qualifier = False
if qualifier in value.keys() or (test_qualifier and declared_value):
multiple_values = False
if qualifier in value.keys():
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
else:
test_list = [schema_field[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
header = 'Value %s%s%s for %s%s' % (quote_text, test_value, quote_text, value_path, item_text)
if 'min_value' in value.keys():
if test_value < value['min_value']:
message = '%s must not be less than "min_value": %s' % (header, value['min_value'])
raise ModelValidationError(message)
if 'max_value' in value.keys():
if test_value > value['max_value']:
message = '%s must not be greater than "max_value": %s' % (header, value['max_value'])
raise ModelValidationError(message)
if 'equal_to' in value.keys():
if test_value != value['equal_to']:
if qualifier != 'declared_value' and isinstance(test_value, bool):
message = '%s must be "equal_to": %s' % (header, value['equal_to'])
raise ModelValidationError(message)
if 'greater_than' in value.keys():
if test_value <= value['greater_than']:
message = '%s must be "greater_than": %s' % (header, value['greater_than'])
raise ModelValidationError(message)
if 'less_than' in value.keys():
if test_value >= value['less_than']:
message = '%s must be "less_than": %s' % (header, value['less_than'])
raise ModelValidationError(message)
if 'integer_data' in value.keys():
if value['integer_data']:
if not isinstance(test_value, int):
message = '%s must be an "integer_data".' % header
raise ModelValidationError(message)
if 'min_length' in value.keys():
if len(test_value) < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if len(test_value) > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
if 'must_not_contain' in value.keys():
for regex in value['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
message = '%s matches regex pattern in "must_not_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'must_contain' in value.keys():
for regex in value['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(test_value):
message = '%s does not match regex pattern in "must_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'contains_either' in value.keys():
regex_match = False
regex_patterns = []
for regex in value['contains_either']:
regex_patterns.append(regex)
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
regex_match = True
if not regex_match:
message = '%s does not match any regex patterns in "contains_either": %s' % (header, regex_patterns)
raise ModelValidationError(message)
if 'byte_data' in value.keys():
message = '%s cannot be base64 decoded to "byte_data".' % header
try:
decoded_bytes = b64decode(test_value)
except:
raise ModelValidationError(message)
if not isinstance(decoded_bytes, bytes):
raise ModelValidationError(message)
# validate discrete value qualifiers against each other
for qualifier in discrete_qualifiers:
test_qualifier = False
if qualifier in schema_field:
test_qualifier = True
if qualifier == 'declared_value' and not schema_field[qualifier]:
test_qualifier = False
if qualifier in value.keys() or (test_qualifier and declared_value):
multiple_values = False
if qualifier in value.keys():
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
else:
test_list = [schema_field[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
header = 'Value %s%s%s for %s%s' % (quote_text, test_value, quote_text, value_path, item_text)
if 'excluded_values' in value.keys():
if not qualifier == 'excluded_values':
if test_value in value['excluded_values']:
message = '%s cannot be one of "excluded_values": %s.' % (header, value['excluded_values'])
raise ModelValidationError(message)
if 'discrete_values' in value.keys():
if not qualifier == 'excluded_values':
if test_value not in value['discrete_values']:
message = '%s must be one of "discrete_values": %s' % (header, value['discrete_values'])
raise ModelValidationError(message)
return fields_dict
def _validate_defaults(self, fields_dict):
# validate key names in fields
for key, value in fields_dict.items():
# retrieve value type and type dict
value_type = self.keyCriteria[self.keyName.index(key)]['value_datatype']
# validate discrete value qualifiers against other criteria
qualifier = 'default_value'
if qualifier in value.keys():
multiple_values = False
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
qualifier_text = value_path + item_text
header = 'Value %s%s%s for %s' % (quote_text, test_value, quote_text, qualifier_text)
if value_type == 'list':
default_item_key = '%s[0]' % key
try:
self.validate(test_value, default_item_key, object_title=header)
except Exception as err:
raise ModelValidationError(str(err).strip().replace('field %s' % default_item_key, qualifier_text))
return fields_dict
def _validate_dict(self, input_dict, schema_dict, path_to_root, object_title=''):
''' a helper method for recursively validating keys in dictionaries
:return input_dict
'''
# reconstruct key path to current dictionary in model
rules_top_level_key = re.sub('\[\d+\]', '[0]', path_to_root)
map_rules = self.keyMap[rules_top_level_key]
# construct list error report template
map_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': map_rules,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': 0,
'error_code': 4001
}
# validate map size
if 'min_size' in map_rules.keys():
input_size = sys.getsizeof(json.dumps(str(input_dict)).replace(' ','')) - 51
if input_size < map_rules['min_size']:
map_error['failed_test'] = 'min_size'
map_error['error_value'] = input_size
map_error['error_code'] = 4031
raise InputValidationError(map_error)
if 'max_size' in map_rules.keys():
input_size = sys.getsizeof(json.dumps(str(input_dict)).replace(' ','')) - 51
if input_size > map_rules['max_size']:
map_error['failed_test'] = 'max_size'
map_error['error_value'] = input_size
map_error['error_code'] = 4032
raise InputValidationError(map_error)
# construct lists of keys in input dictionary
input_keys = []
input_key_list = []
for key in input_dict.keys():
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'key_datatype',
'input_path': path_to_root,
'error_value': key,
'error_code': 4004
}
error_dict['input_criteria']['key_datatype'] = 'string'
if path_to_root == '.':
if not isinstance(key, str):
input_key_name = path_to_root + str(key)
error_dict['input_path'] = input_key_name
raise InputValidationError(error_dict)
input_key_name = path_to_root + key
else:
if not isinstance(key, str):
input_key_name = path_to_root + '.' + str(key)
error_dict['input_path'] = input_key_name
raise InputValidationError(error_dict)
input_key_name = path_to_root + '.' + key
input_keys.append(input_key_name)
input_key_list.append(key)
# TODO: validate top-level key and values against identical to reference
# TODO: run lambda function and call validation
# construct lists of keys in schema dictionary
max_keys = []
max_key_list = []
req_keys = []
req_key_list = []
for key in schema_dict.keys():
if path_to_root == '.':
schema_key_name = path_to_root + key
else:
schema_key_name = path_to_root + '.' + key
max_keys.append(schema_key_name)
max_key_list.append(key)
rules_schema_key_name = re.sub('\[\d+\]', '[0]', schema_key_name)
if self.keyMap[rules_schema_key_name]['required_field']:
req_keys.append(schema_key_name)
req_key_list.append(key)
# validate existence of required fields
missing_keys = set(req_keys) - set(input_keys)
if missing_keys:
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'required_field',
'input_path': path_to_root,
'error_value': list(missing_keys),
'error_code': 4002
}
error_dict['input_criteria']['required_keys'] = req_keys
raise InputValidationError(error_dict)
# validate existence of extra fields
extra_keys = set(input_keys) - set(max_keys)
if extra_keys and not self.keyMap[rules_top_level_key]['extra_fields']:
extra_key_list = []
for key in extra_keys:
pathless_key = re.sub(rules_top_level_key, '', key, count=1)
extra_key_list.append(pathless_key)
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'extra_fields',
'input_path': path_to_root,
'error_value': extra_key_list,
'error_code': 4003
}
error_dict['input_criteria']['maximum_scope'] = max_key_list
raise InputValidationError(error_dict)
# validate datatype of value
for key, value in input_dict.items():
if path_to_root == '.':
input_key_name = path_to_root + key
else:
input_key_name = path_to_root + '.' + key
rules_input_key_name = re.sub('\[\d+\]', '[0]', input_key_name)
if input_key_name in max_keys:
input_criteria = self.keyMap[rules_input_key_name]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': input_key_name,
'error_value': value,
'error_code': 4001
}
try:
value_index = self._datatype_classes.index(value.__class__)
except:
error_dict['error_value'] = value.__class__.__name__
raise InputValidationError(error_dict)
value_type = self._datatype_names[value_index]
if input_criteria['value_datatype'] == 'null':
pass
else:
if value_type != input_criteria['value_datatype']:
raise InputValidationError(error_dict)
# call appropriate validation sub-routine for datatype of value
if value_type == 'boolean':
input_dict[key] = self._validate_boolean(value, input_key_name, object_title)
elif value_type == 'number':
input_dict[key] = self._validate_number(value, input_key_name, object_title)
elif value_type == 'string':
input_dict[key] = self._validate_string(value, input_key_name, object_title)
elif value_type == 'map':
input_dict[key] = self._validate_dict(value, schema_dict[key], input_key_name, object_title)
elif value_type == 'list':
input_dict[key] = self._validate_list(value, schema_dict[key], input_key_name, object_title)
# set default values for empty optional fields
for key in max_key_list:
if key not in input_key_list:
indexed_key = max_keys[max_key_list.index(key)]
if indexed_key in self.components.keys():
if 'default_value' in self.components[indexed_key]:
input_dict[key] = self.components[indexed_key]['default_value']
return input_dict
def _validate_list(self, input_list, schema_list, path_to_root, object_title=''):
'''
a helper method for recursively validating items in a list
:return: input_list
'''
# construct rules for list and items
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
list_rules = self.keyMap[rules_path_to_root]
initial_key = rules_path_to_root + '[0]'
item_rules = self.keyMap[initial_key]
# construct list error report template
list_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': list_rules,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': 0,
'error_code': 4001
}
# validate list rules
if 'min_size' in list_rules.keys():
if len(input_list) < list_rules['min_size']:
list_error['failed_test'] = 'min_size'
list_error['error_value'] = len(input_list)
list_error['error_code'] = 4031
raise InputValidationError(list_error)
if 'max_size' in list_rules.keys():
if len(input_list) > list_rules['max_size']:
list_error['failed_test'] = 'max_size'
list_error['error_value'] = len(input_list)
list_error['error_code'] = 4032
raise InputValidationError(list_error)
# construct item error report template
item_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': item_rules,
'failed_test': 'value_datatype',
'input_path': initial_key,
'error_value': None,
'error_code': 4001
}
# validate datatype of items
for i in range(len(input_list)):
input_path = path_to_root + '[%s]' % i
item = input_list[i]
item_error['input_path'] = input_path
try:
item_index = self._datatype_classes.index(item.__class__)
except:
item_error['error_value'] = item.__class__.__name__
raise InputValidationError(item_error)
item_type = self._datatype_names[item_index]
item_error['error_value'] = item
if item_rules['value_datatype'] == 'null':
pass
else:
if item_type != item_rules['value_datatype']:
raise InputValidationError(item_error)
# call appropriate validation sub-routine for datatype of item
if item_type == 'boolean':
input_list[i] = self._validate_boolean(item, input_path, object_title)
elif item_type == 'number':
input_list[i] = self._validate_number(item, input_path, object_title)
elif item_type == 'string':
input_list[i] = self._validate_string(item, input_path, object_title)
elif item_type == 'map':
input_list[i] = self._validate_dict(item, schema_list[0], input_path, object_title)
elif item_type == 'list':
input_list[i] = self._validate_list(item, schema_list[0], input_path, object_title)
# validate unique values in list
if 'unique_values' in list_rules.keys():
if len(set(input_list)) < len(input_list):
list_error['failed_test'] = 'unique_values'
list_error['error_value'] = input_list
list_error['error_code'] = 4033
raise InputValidationError(list_error)
# TODO: validate top-level item values against identical to reference
# TODO: run lambda function and call validation url
return input_list
def _validate_number(self, input_number, path_to_root, object_title=''):
'''
a helper method for validating properties of a number
:return: input_number
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_number,
'error_code': 4001
}
if 'integer_data' in input_criteria.keys():
if input_criteria['integer_data'] and not isinstance(input_number, int):
error_dict['failed_test'] = 'integer_data'
error_dict['error_code'] = 4021
raise InputValidationError(error_dict)
if 'min_value' in input_criteria.keys():
if input_number < input_criteria['min_value']:
error_dict['failed_test'] = 'min_value'
error_dict['error_code'] = 4022
raise InputValidationError(error_dict)
if 'max_value' in input_criteria.keys():
if input_number > input_criteria['max_value']:
error_dict['failed_test'] = 'max_value'
error_dict['error_code'] = 4023
raise InputValidationError(error_dict)
if 'greater_than' in input_criteria.keys():
if input_number <= input_criteria['greater_than']:
error_dict['failed_test'] = 'greater_than'
error_dict['error_code'] = 4024
raise InputValidationError(error_dict)
if 'less_than' in input_criteria.keys():
if input_number >= input_criteria['less_than']:
error_dict['failed_test'] = 'less_than'
error_dict['error_code'] = 4025
raise InputValidationError(error_dict)
if 'equal_to' in input_criteria.keys():
if input_number != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
if 'discrete_values' in input_criteria.keys():
if input_number not in input_criteria['discrete_values']:
error_dict['failed_test'] = 'discrete_values'
error_dict['error_code'] = 4041
raise InputValidationError(error_dict)
if 'excluded_values' in input_criteria.keys():
if input_number in input_criteria['excluded_values']:
error_dict['failed_test'] = 'excluded_values'
error_dict['error_code'] = 4042
raise InputValidationError(error_dict)
# TODO: validate number against identical to reference
# TODO: run lambda function and call validation url
return input_number
def _validate_string(self, input_string, path_to_root, object_title=''):
'''
a helper method for validating properties of a string
:return: input_string
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_string,
'error_code': 4001
}
if 'byte_data' in input_criteria.keys():
if input_criteria['byte_data']:
error_dict['failed_test'] = 'byte_data'
error_dict['error_code'] = 4011
try:
decoded_bytes = b64decode(input_string)
except:
raise InputValidationError(error_dict)
if not isinstance(decoded_bytes, bytes):
raise InputValidationError(error_dict)
if 'min_value' in input_criteria.keys():
if input_string < input_criteria['min_value']:
error_dict['failed_test'] = 'min_value'
error_dict['error_code'] = 4022
raise InputValidationError(error_dict)
if 'max_value' in input_criteria.keys():
if input_string > input_criteria['max_value']:
error_dict['failed_test'] = 'max_value'
error_dict['error_code'] = 4023
raise InputValidationError(error_dict)
if 'greater_than' in input_criteria.keys():
if input_string <= input_criteria['greater_than']:
error_dict['failed_test'] = 'greater_than'
error_dict['error_code'] = 4024
raise InputValidationError(error_dict)
if 'less_than' in input_criteria.keys():
if input_string >= input_criteria['less_than']:
error_dict['failed_test'] = 'less_than'
error_dict['error_code'] = 4025
raise InputValidationError(error_dict)
if 'equal_to' in input_criteria.keys():
if input_string != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
if 'min_length' in input_criteria.keys():
if len(input_string) < input_criteria['min_length']:
error_dict['failed_test'] = 'min_length'
error_dict['error_code'] = 4012
raise InputValidationError(error_dict)
if 'max_length' in input_criteria.keys():
if len(input_string) > input_criteria['max_length']:
error_dict['failed_test'] = 'max_length'
error_dict['error_code'] = 4013
raise InputValidationError(error_dict)
if 'must_not_contain' in input_criteria.keys():
for regex in input_criteria['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(input_string):
error_dict['failed_test'] = 'must_not_contain'
error_dict['error_code'] = 4014
raise InputValidationError(error_dict)
if 'must_contain' in input_criteria.keys():
for regex in input_criteria['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(input_string):
error_dict['failed_test'] = 'must_contain'
error_dict['error_code'] = 4015
raise InputValidationError(error_dict)
if 'contains_either' in input_criteria.keys():
regex_match = False
for regex in input_criteria['contains_either']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(input_string):
regex_match = True
if not regex_match:
error_dict['failed_test'] = 'contains_either'
error_dict['error_code'] = 4016
raise InputValidationError(error_dict)
if 'discrete_values' in input_criteria.keys():
if input_string not in input_criteria['discrete_values']:
error_dict['failed_test'] = 'discrete_values'
error_dict['error_code'] = 4041
raise InputValidationError(error_dict)
if 'excluded_values' in input_criteria.keys():
if input_string in input_criteria['excluded_values']:
error_dict['failed_test'] = 'excluded_values'
error_dict['error_code'] = 4042
raise InputValidationError(error_dict)
# TODO: validate string against identical to reference
# TODO: run lambda function and call validation url
return input_string
def _validate_boolean(self, input_boolean, path_to_root, object_title=''):
'''
a helper method for validating properties of a boolean
:return: input_boolean
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_boolean,
'error_code': 4001
}
if 'equal_to' in input_criteria.keys():
if input_boolean != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
# TODO: validate boolean against identical to reference
# TODO: run lambda function and call validation url
return input_boolean
def _ingest_dict(self, input_dict, schema_dict, path_to_root):
'''
a helper method for ingesting keys, value pairs in a dictionary
:return: valid_dict
'''
valid_dict = {}
# construct path to root for rules
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
# iterate over keys in schema dict
for key, value in schema_dict.items():
key_path = path_to_root
if not key_path == '.':
key_path += '.'
key_path += key
rules_key_path = re.sub('\[\d+\]', '[0]', key_path)
value_match = False
if key in input_dict.keys():
value_index = self._datatype_classes.index(value.__class__)
value_type = self._datatype_names[value_index]
try:
v_index = self._datatype_classes.index(input_dict[key].__class__)
v_type = self._datatype_names[v_index]
if v_type == value_type:
value_match = True
except:
value_match = False
if value_match:
if value_type == 'null':
valid_dict[key] = input_dict[key]
elif value_type == 'boolean':
valid_dict[key] = self._ingest_boolean(input_dict[key], key_path)
elif value_type == 'number':
valid_dict[key] = self._ingest_number(input_dict[key], key_path)
elif value_type == 'string':
valid_dict[key] = self._ingest_string(input_dict[key], key_path)
elif value_type == 'map':
valid_dict[key] = self._ingest_dict(input_dict[key], schema_dict[key], key_path)
elif value_type == 'list':
valid_dict[key] = self._ingest_list(input_dict[key], schema_dict[key], key_path)
else:
value_type = self.keyMap[rules_key_path]['value_datatype']
if 'default_value' in self.keyMap[rules_key_path]:
valid_dict[key] = self.keyMap[rules_key_path]['default_value']
elif value_type == 'null':
valid_dict[key] = None
elif value_type == 'boolean':
valid_dict[key] = False
elif value_type == 'number':
valid_dict[key] = 0.0
if 'integer_data' in self.keyMap[rules_key_path].keys():
if self.keyMap[rules_key_path]['integer_data']:
valid_dict[key] = 0
elif value_type == 'string':
valid_dict[key] = ''
elif value_type == 'list':
valid_dict[key] = []
elif value_type == 'map':
valid_dict[key] = self._ingest_dict({}, schema_dict[key], key_path)
# add extra fields if set to True
if self.keyMap[rules_path_to_root]['extra_fields']:
for key, value in input_dict.items():
if key not in valid_dict.keys():
valid_dict[key] = value
return valid_dict
def _ingest_list(self, input_list, schema_list, path_to_root):
'''
a helper method for ingesting items in a list
:return: valid_list
'''
valid_list = []
# construct max list size
max_size = None
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'max_size' in self.keyMap[rules_path_to_root].keys():
if not self.keyMap[rules_path_to_root]['max_size']:
return valid_list
else:
max_size = self.keyMap[rules_path_to_root]['max_size']
# iterate over items in input list
if input_list:
rules_index = self._datatype_classes.index(schema_list[0].__class__)
rules_type = self._datatype_names[rules_index]
for i in range(len(input_list)):
item_path = '%s[%s]' % (path_to_root, i)
value_match = False
try:
item_index = self._datatype_classes.index(input_list[i].__class__)
item_type = self._datatype_names[item_index]
if item_type == rules_type:
value_match = True
except:
value_match = False
if value_match:
try:
if item_type == 'boolean':
valid_list.append(self._validate_boolean(input_list[i], item_path))
elif item_type == 'number':
valid_list.append(self._validate_number(input_list[i], item_path))
elif item_type == 'string':
valid_list.append(self._validate_string(input_list[i], item_path))
elif item_type == 'map':
valid_list.append(self._ingest_dict(input_list[i], schema_list[0], item_path))
elif item_type == 'list':
valid_list.append(self._ingest_list(input_list[i], schema_list[0], item_path))
except:
pass
if isinstance(max_size, int):
if len(valid_list) == max_size:
return valid_list
return valid_list
def _ingest_number(self, input_number, path_to_root):
'''
a helper method for ingesting a number
:return: valid_number
'''
valid_number = 0.0
try:
valid_number = self._validate_number(input_number, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_number = self.keyMap[rules_path_to_root]['default_value']
elif 'integer_data' in self.keyMap[rules_path_to_root].keys():
if self.keyMap[rules_path_to_root]['integer_data']:
valid_number = 0
return valid_number
def _ingest_string(self, input_string, path_to_root):
'''
a helper method for ingesting a string
:return: valid_string
'''
valid_string = ''
try:
valid_string = self._validate_string(input_string, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_string = self.keyMap[rules_path_to_root]['default_value']
return valid_string
def _ingest_boolean(self, input_boolean, path_to_root):
'''
a helper method for ingesting a boolean
:return: valid_boolean
'''
valid_boolean = False
try:
valid_boolean = self._validate_boolean(input_boolean, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_boolean = self.keyMap[rules_path_to_root]['default_value']
return valid_boolean
def _reconstruct(self, path_to_root):
'''
a helper method for finding the schema endpoint from a path to root
:param path_to_root: string with dot path to root from
:return: list, dict, string, number, or boolean at path to root
'''
# split path to root into segments
item_pattern = re.compile('\d+\\]')
dot_pattern = re.compile('\\.|\\[')
path_segments = dot_pattern.split(path_to_root)
# construct base schema endpoint
schema_endpoint = self.schema
# reconstruct schema endpoint from segments
if path_segments[1]:
for i in range(1,len(path_segments)):
if item_pattern.match(path_segments[i]):
schema_endpoint = schema_endpoint[0]
else:
schema_endpoint = schema_endpoint[path_segments[i]]
return schema_endpoint
def _walk(self, path_to_root, record_dict):
'''
a helper method for finding the record endpoint from a path to root
:param path_to_root: string with dot path to root from
:param record_dict:
:return: list, dict, string, number, or boolean at path to root
'''
# split path to root into segments
item_pattern = re.compile('\d+\\]')
dot_pattern = re.compile('\\.|\\[')
path_segments = dot_pattern.split(path_to_root)
# construct empty fields
record_endpoints = []
# determine starting position
if not path_segments[0]:
path_segments.pop(0)
# define internal recursive function
def _walk_int(path_segments, record_dict):
record_endpoint = record_dict
for i in range(0, len(path_segments)):
if item_pattern.match(path_segments[i]):
for j in range(0, len(record_endpoint)):
if len(path_segments) == 2:
record_endpoints.append(record_endpoint[j])
else:
stop_chain = False
for x in range(0, i):
if item_pattern.match(path_segments[x]):
stop_chain = True
if not stop_chain:
shortened_segments = []
for z in range(i + 1, len(path_segments)):
shortened_segments.append(path_segments[z])
_walk_int(shortened_segments, record_endpoint[j])
else:
stop_chain = False
for y in range(0, i):
if item_pattern.match(path_segments[y]):
stop_chain = True
if not stop_chain:
if len(path_segments) == i + 1:
record_endpoints.append(record_endpoint[path_segments[i]])
else:
record_endpoint = record_endpoint[path_segments[i]]
# conduct recursive walk
_walk_int(path_segments, record_dict)
return record_endpoints
def validate(self, input_data, path_to_root='', object_title=''):
'''
a core method for validating input against the model
input_data is only returned if all data is valid
:param input_data: list, dict, string, number, or boolean to validate
:param path_to_root: [optional] string with dot-path of model component
:param object_title: [optional] string with name of input to validate
:return: input_data (or InputValidationError)
'''
__name__ = '%s.validate' % self.__class__.__name__
_path_arg = '%s(path_to_root="...")' % __name__
_title_arg = '%s(object_title="...")' % __name__
# validate input
copy_path = path_to_root
if path_to_root:
if not isinstance(path_to_root, str):
raise ModelValidationError('%s must be a string.' % _path_arg)
else:
if path_to_root[0] != '.':
copy_path = '.%s' % path_to_root
if not copy_path in self.keyMap.keys():
raise ModelValidationError('%s does not exist in components %s.' % (_path_arg.replace('...', path_to_root), self.keyMap.keys()))
else:
copy_path = '.'
if object_title:
if not isinstance(object_title, str):
raise ModelValidationError('%s must be a string' % _title_arg)
# construct generic error dictionary
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[copy_path],
'failed_test': 'value_datatype',
'input_path': copy_path,
'error_value': input_data,
'error_code': 4001
}
# determine value type of input data
try:
input_index = self._datatype_classes.index(input_data.__class__)
except:
error_dict['error_value'] = input_data.__class__.__name__
raise InputValidationError(error_dict)
input_type = self._datatype_names[input_index]
# validate input data type
if input_type != self.keyMap[copy_path]['value_datatype']:
raise InputValidationError(error_dict)
# run helper method appropriate to data type
if input_type == 'boolean':
input_data = self._validate_boolean(input_data, copy_path, object_title)
elif input_type == 'number':
input_data = self._validate_number(input_data, copy_path, object_title)
elif input_type == 'string':
input_data = self._validate_string(input_data, copy_path, object_title)
elif input_type == 'list':
schema_list = self._reconstruct(copy_path)
input_data = self._validate_list(input_data, schema_list, copy_path, object_title)
elif input_type == 'map':
schema_dict = self._reconstruct(copy_path)
input_data = self._validate_dict(input_data, schema_dict, copy_path, object_title)
return input_data
def ingest(self, **kwargs):
'''
a core method to ingest and validate arbitrary keyword data
**NOTE: data is always returned with this method**
for each key in the model, a value is returned according
to the following priority:
1. value in kwargs if field passes validation test
2. default value declared for the key in the model
3. empty value appropriate to datatype of key in the model
**NOTE: as long as a default value is provided for each key-
value, returned data will be model valid
**NOTE: if 'extra_fields' is True for a dictionary, the key-
value pair of all fields in kwargs which are not declared in
the model will also be added to the corresponding dictionary
data
**NOTE: if 'max_size' is declared for a list, method will
stop adding input to the list once it reaches max size
:param kwargs: key, value pairs
:return: dictionary with keys and value
'''
__name__ = '%s.ingest' % self.__class__.__name__
schema_dict = self.schema
path_to_root = '.'
valid_data = self._ingest_dict(kwargs, schema_dict, path_to_root)
return valid_data
def query(self, query_criteria, valid_record=None):
'''
a core method for querying model valid data with criteria
**NOTE: input is only returned if all fields & qualifiers are valid for model
:param query_criteria: dictionary with model field names and query qualifiers
:param valid_record: dictionary with model valid record
:return: boolean (or QueryValidationError)
an example of how to construct the query_criteria argument:
query_criteria = {
'.path.to.number': {
'min_value': 4.5
},
'.path.to.string': {
'must_contain': [ '\\regex' ]
}
}
**NOTE: for a full list of operators for query_criteria based upon field
datatype, see either the query-rules.json file or REFERENCE file
'''
__name__ = '%s.query' % self.__class__.__name__
_query_arg = '%s(query_criteria={...})' % __name__
_record_arg = '%s(valid_record={...})' % __name__
# validate input
if not isinstance(query_criteria, dict):
raise ModelValidationError('%s must be a dictionary.' % _query_arg)
# convert javascript dot_path to class dot_path
criteria_copy = {}
equal_fields = []
dot_fields = []
for key, value in query_criteria.items():
copy_key = key
if not key:
copy_key = '.'
else:
if key[0] != '.':
copy_key = '.%s' % key
dot_fields.append(copy_key)
criteria_copy[copy_key] = value
if value.__class__ in self._datatype_classes[0:4]:
criteria_copy[copy_key] = {
'equal_to': value
}
equal_fields.append(copy_key)
# validate query criteria against query rules
query_kwargs = {
'fields_dict': criteria_copy,
'fields_rules': self.queryRules,
'declared_value': False
}
try:
self._validate_fields(**query_kwargs)
except ModelValidationError as err:
message = err.error['message']
for field in equal_fields:
equal_error = 'field %s qualifier equal_to' % field
if message.find(equal_error) > -1:
message = message.replace(equal_error, 'field %s' % field)
break
field_pattern = re.compile('ield\s(\..*?)\s')
field_name = field_pattern.findall(message)
if field_name:
if field_name[0] in dot_fields:
def _replace_field(x):
return 'ield %s ' % x.group(1)[1:]
message = field_pattern.sub(_replace_field, message)
raise QueryValidationError(message)
# query test record
if valid_record:
if not isinstance(valid_record, dict):
raise ModelValidationError('%s must be a dictionary.' % _record_arg)
for key, value in criteria_copy.items():
eval_outcome = self._evaluate_field(valid_record, key, value)
if not eval_outcome:
return False
return True
|
collectiveacuity/jsonModel
|
jsonmodel/validators.py
|
jsonModel._validate_dict
|
python
|
def _validate_dict(self, input_dict, schema_dict, path_to_root, object_title=''):
''' a helper method for recursively validating keys in dictionaries
:return input_dict
'''
# reconstruct key path to current dictionary in model
rules_top_level_key = re.sub('\[\d+\]', '[0]', path_to_root)
map_rules = self.keyMap[rules_top_level_key]
# construct list error report template
map_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': map_rules,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': 0,
'error_code': 4001
}
# validate map size
if 'min_size' in map_rules.keys():
input_size = sys.getsizeof(json.dumps(str(input_dict)).replace(' ','')) - 51
if input_size < map_rules['min_size']:
map_error['failed_test'] = 'min_size'
map_error['error_value'] = input_size
map_error['error_code'] = 4031
raise InputValidationError(map_error)
if 'max_size' in map_rules.keys():
input_size = sys.getsizeof(json.dumps(str(input_dict)).replace(' ','')) - 51
if input_size > map_rules['max_size']:
map_error['failed_test'] = 'max_size'
map_error['error_value'] = input_size
map_error['error_code'] = 4032
raise InputValidationError(map_error)
# construct lists of keys in input dictionary
input_keys = []
input_key_list = []
for key in input_dict.keys():
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'key_datatype',
'input_path': path_to_root,
'error_value': key,
'error_code': 4004
}
error_dict['input_criteria']['key_datatype'] = 'string'
if path_to_root == '.':
if not isinstance(key, str):
input_key_name = path_to_root + str(key)
error_dict['input_path'] = input_key_name
raise InputValidationError(error_dict)
input_key_name = path_to_root + key
else:
if not isinstance(key, str):
input_key_name = path_to_root + '.' + str(key)
error_dict['input_path'] = input_key_name
raise InputValidationError(error_dict)
input_key_name = path_to_root + '.' + key
input_keys.append(input_key_name)
input_key_list.append(key)
# TODO: validate top-level key and values against identical to reference
# TODO: run lambda function and call validation
# construct lists of keys in schema dictionary
max_keys = []
max_key_list = []
req_keys = []
req_key_list = []
for key in schema_dict.keys():
if path_to_root == '.':
schema_key_name = path_to_root + key
else:
schema_key_name = path_to_root + '.' + key
max_keys.append(schema_key_name)
max_key_list.append(key)
rules_schema_key_name = re.sub('\[\d+\]', '[0]', schema_key_name)
if self.keyMap[rules_schema_key_name]['required_field']:
req_keys.append(schema_key_name)
req_key_list.append(key)
# validate existence of required fields
missing_keys = set(req_keys) - set(input_keys)
if missing_keys:
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'required_field',
'input_path': path_to_root,
'error_value': list(missing_keys),
'error_code': 4002
}
error_dict['input_criteria']['required_keys'] = req_keys
raise InputValidationError(error_dict)
# validate existence of extra fields
extra_keys = set(input_keys) - set(max_keys)
if extra_keys and not self.keyMap[rules_top_level_key]['extra_fields']:
extra_key_list = []
for key in extra_keys:
pathless_key = re.sub(rules_top_level_key, '', key, count=1)
extra_key_list.append(pathless_key)
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'extra_fields',
'input_path': path_to_root,
'error_value': extra_key_list,
'error_code': 4003
}
error_dict['input_criteria']['maximum_scope'] = max_key_list
raise InputValidationError(error_dict)
# validate datatype of value
for key, value in input_dict.items():
if path_to_root == '.':
input_key_name = path_to_root + key
else:
input_key_name = path_to_root + '.' + key
rules_input_key_name = re.sub('\[\d+\]', '[0]', input_key_name)
if input_key_name in max_keys:
input_criteria = self.keyMap[rules_input_key_name]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': input_key_name,
'error_value': value,
'error_code': 4001
}
try:
value_index = self._datatype_classes.index(value.__class__)
except:
error_dict['error_value'] = value.__class__.__name__
raise InputValidationError(error_dict)
value_type = self._datatype_names[value_index]
if input_criteria['value_datatype'] == 'null':
pass
else:
if value_type != input_criteria['value_datatype']:
raise InputValidationError(error_dict)
# call appropriate validation sub-routine for datatype of value
if value_type == 'boolean':
input_dict[key] = self._validate_boolean(value, input_key_name, object_title)
elif value_type == 'number':
input_dict[key] = self._validate_number(value, input_key_name, object_title)
elif value_type == 'string':
input_dict[key] = self._validate_string(value, input_key_name, object_title)
elif value_type == 'map':
input_dict[key] = self._validate_dict(value, schema_dict[key], input_key_name, object_title)
elif value_type == 'list':
input_dict[key] = self._validate_list(value, schema_dict[key], input_key_name, object_title)
# set default values for empty optional fields
for key in max_key_list:
if key not in input_key_list:
indexed_key = max_keys[max_key_list.index(key)]
if indexed_key in self.components.keys():
if 'default_value' in self.components[indexed_key]:
input_dict[key] = self.components[indexed_key]['default_value']
return input_dict
|
a helper method for recursively validating keys in dictionaries
:return input_dict
|
train
|
https://github.com/collectiveacuity/jsonModel/blob/1ea64c36d78add3faa7b85ff82c5ec685458c940/jsonmodel/validators.py#L732-L904
| null |
class jsonModel(object):
__rules__ = jsonLoader('jsonmodel', 'models/model-rules.json')
def __init__(self, data_model, query_rules=None):
'''
a method for testing data model declaration & initializing the class
:param data_model: dictionary with json model architecture
:param query_rules: [optional] dictionary with valid field type qualifiers
:return: object with jsonModel methods
'''
# validate schema input
if not isinstance(data_model, dict):
raise ModelValidationError('Data model must be a dictionary.')
elif 'schema' not in data_model.keys():
raise ModelValidationError('Data model must have a schema key.')
elif not isinstance(data_model['schema'], dict):
raise ModelValidationError('Value for the data model "schema" field must be a dictionary.')
elif not data_model['schema']:
raise ModelValidationError('Data model "schema" field must not be empty.')
# construct base methods
from copy import deepcopy
data_model = deepcopy(data_model)
self.schema = data_model['schema']
model_map = mapModel(self.schema)
self.keyName = model_map.keyName
self.keyCriteria = model_map.keyCriteria
# construct protected type classes
self._datatype_names = mapModel._datatype_names
self._datatype_classes = mapModel._datatype_classes
# validate absence of item designators in keys
item_pattern = re.compile('\[\d+\]')
for i in range(len(self.keyName)):
patterns_found = item_pattern.findall(self.keyName[i])
if patterns_found:
for designator in patterns_found:
if designator != '[0]':
message = 'Key name for schema field %s must not contain the item designator pattern %s' % (self.keyName[i], designator)
raise ModelValidationError(message)
# validate existence of first item in list declarations
key_set = set(self.keyName)
for i in range(len(self.keyName)):
if self.keyCriteria[i]['value_datatype'] == 'list':
item_key = self.keyName[i] + '[0]'
if not item_key in key_set:
message = 'Schema field %s must declare an initial item for the list.' % self.keyName[i]
raise ModelValidationError(message)
# alter list requirement if first item is empty
else:
item_index = self.keyName.index(item_key)
if not self.keyCriteria[item_index]['required_field']:
self.keyCriteria[i]['required_field'] = False
# validate title input & construct title method
self.title = ''
if 'title' in data_model.keys():
if not isinstance(data_model['title'], str):
raise ModelValidationError('Value for model title must be a string.')
self.title = data_model['title']
# validate description input & construct description method
self.description = ''
if 'description' in data_model.keys():
if not isinstance(data_model['description'], str):
raise ModelValidationError('Value for model description must be a string.')
self.description = data_model['description']
# validate url input & construct title method
self.url = ''
if 'url' in data_model.keys():
if not isinstance(data_model['url'], str):
raise ModelValidationError('Value for model url must be a string.')
self.title = data_model['url']
# validate metadata input & construct metadata method
self.metadata = {}
if 'metadata' in data_model.keys():
if not isinstance(data_model['metadata'], dict):
raise ModelValidationError('Value for model metadata must be a dictionary.')
self.metadata = data_model['metadata']
# validate max size input & construct maxSize property
# self.maxSize = None
# if 'max_size' in data_model.keys():
# if not isinstance(data_model['max_size'], int):
# raise ModelValidationError('Value for model max_size must be a positive integer.')
# elif data_model['max_size'] < 0:
# raise ModelValidationError('Value for model max_size must be a positive integer.')
# elif data_model['max_size']:
# self.maxSize = data_model['max_size']
# validate components input & construct component property
self.components = {}
if 'components' in data_model.keys():
if not isinstance(data_model['components'], dict):
raise ModelValidationError('Value for model components must be a dictionary.')
self.components = self._validate_fields(data_model['components'], self.__rules__['components'])
# construct keyMap fields from key names and key criteria
self.keyMap = {}
for i in range(len(self.keyName)):
self.keyMap[self.keyName[i]] = self.keyCriteria[i]
for key, value in self.components.items():
# convert javascript dot_path to class dot_path
dot_key = ''
if not key:
dot_key = '.'
else:
if key[0] != '.':
dot_key = '.%s' % key
# add component declarations to keyMap
if key in self.keyMap.keys():
for k, v in self.components[key].items():
self.keyMap[key][k] = v
elif dot_key and dot_key in self.keyMap.keys():
for k, v in self.components[key].items():
self.keyMap[dot_key][k] = v
# validate default values in lists
self._validate_defaults(self.keyMap)
# construct queryRules property from class model rules
self.queryRules = {}
for key, value in self.__rules__['components'].items():
remove_from_query = [ 'required_field', 'default_value', 'example_values', 'field_title', 'field_position', 'field_description', 'field_metadata', 'extra_fields' ]
field_qualifiers = {
'value_exists': False
}
for k, v in value.items():
if k not in remove_from_query:
field_qualifiers[k] = v
self.queryRules[key] = field_qualifiers
# validate query rules input and replace queryRules property
if query_rules:
if not isinstance(query_rules, dict):
message = 'Value for query rules input must be a dictionary.'
raise ModelValidationError(message)
input_set = set(query_rules.keys())
req_set = set(self.queryRules.keys())
if input_set - req_set:
message = 'Query rules input may only have %s field key names.' % req_set
raise ModelValidationError(message)
elif req_set - input_set:
message = 'Query rules input must have all %s field key names.' % req_set
raise ModelValidationError(message)
for key in req_set:
if not isinstance(query_rules[key], dict):
message = 'Value for query rules %s field must be a dictionary.' % key
raise ModelValidationError(message)
input_qualifier_set = set(query_rules[key].keys())
req_qualifier_set = set(self.queryRules[key].keys())
if input_qualifier_set - req_qualifier_set:
message = 'Query rules field %s may only have qualifiers %s' % (key, req_qualifier_set)
raise ModelValidationError(message)
for k, v in query_rules[key].items():
if v.__class__ != self.queryRules[key][k].__class__:
qualifier_index = self._datatype_classes.index(self.queryRules[key][k].__class__)
qualifier_type = self._datatype_names[qualifier_index]
message = 'Value for query rules field %s qualifier %s must be a "%s" datatype.' % (key, k, qualifier_type)
raise ModelValidationError(message)
self.queryRules = query_rules
def _validate_fields(self, fields_dict, fields_rules, declared_value=True):
# validate key names in fields
for key, value in fields_dict.items():
# convert javascript dot_path to class dot_path
if not key:
key = '.'
else:
if key[0] != '.':
key = '.%s' % key
if key not in self.keyName:
raise ModelValidationError('Field %s is not a field declared in model schema.' % key)
elif not isinstance(value, dict):
raise ModelValidationError('Value for field %s must be a dictionary.' % key)
# validate field criteria are appropriate to field datatype
value_type = self.keyCriteria[self.keyName.index(key)]['value_datatype']
type_dict = {}
if value_type == 'string':
type_dict = fields_rules['.string_fields']
elif value_type == 'number':
type_dict = fields_rules['.number_fields']
elif value_type == 'boolean':
type_dict = fields_rules['.boolean_fields']
elif value_type == 'list':
type_dict = fields_rules['.list_fields']
elif value_type == 'map':
type_dict = fields_rules['.map_fields']
elif value_type == 'null':
type_dict = fields_rules['.null_fields']
if set(value.keys()) - set(type_dict.keys()):
raise ModelValidationError('Field %s may only have datatype %s qualifiers %s.' % (key, value_type, set(type_dict.keys())))
# validate criteria qualifier values are appropriate datatype
for k, v in value.items():
v_index = self._datatype_classes.index(v.__class__)
v_type = self._datatype_names[v_index]
qualifier_index = self._datatype_classes.index(type_dict[k].__class__)
qualifier_type = self._datatype_names[qualifier_index]
if v_type != qualifier_type:
message = 'Value for field %s qualifier %s must be a %s datatype.' % (key, k, qualifier_type)
raise ModelValidationError(message)
if qualifier_type == 'number':
if isinstance(type_dict[k], int):
if not isinstance(v, int):
message = 'Value for field %s qualifier %s must be an integer.' % (key, k)
raise ModelValidationError(message)
# validate internal logic of each qualifier value declaration
if k in ('must_not_contain', 'must_contain', 'contains_either'):
for item in v:
if not isinstance(item, str):
message = 'Each item in list field %s qualifier %s must be a string.' % (key, k)
raise ModelValidationError(message)
if k in ('min_length', 'max_length', 'min_size', 'max_size'):
if v < 0:
message = 'Value for field %s qualifier %s cannot be negative.' % (key, k)
raise ModelValidationError(message)
if k in ('discrete_values', 'excluded_values', 'example_values'):
for item in v:
if value_type == 'number':
if not isinstance(item, int) and not isinstance(item, float):
message = 'Each item in field %s qualifier %s list must be a number.' % (key, k)
raise ModelValidationError(message)
elif not isinstance(item, str):
message = 'Each item in list for field %s qualifier %s must be a string.' % (key, k)
raise ModelValidationError(message)
if k == 'identical_to':
if not v in self.keyName:
message = 'Value "%s" for field %s qualifier %s not found in components keys.' % (v, key, k)
raise ModelValidationError(message)
if k == 'unique_values':
if v:
item_name = key + '[0]'
item_type = self.keyCriteria[self.keyName.index(item_name)]['value_datatype']
if not item_type in ('number', 'string'):
message = 'Field %s[0] must be either a string or number if qualifier "unique_values": true' % key
raise ModelValidationError(message)
# validate lack of other qualifiers if value exist is false
if 'value_exists' in value.keys():
if not value['value_exists']:
if set(value.keys()) - {'value_exists'}:
message = 'If field %s qualifier value_exists: false, field may not have other qualifiers.' % key
raise ModelValidationError(message)
# validate size qualifiers against each other
size_qualifiers = ['min_size', 'max_size']
for qualifier in size_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
header = 'Value %s for %s' % (test_value, value_path)
if 'min_size' in value.keys():
if test_value < value['min_size']:
message = '%s must not be less than "min_size": %s' % (header, value['min_size'])
raise ModelValidationError(message)
if 'max_size' in value.keys():
if test_value > value['max_size']:
message = '%s must not be greater than "max_size": %s' % (header, value['max_size'])
raise ModelValidationError(message)
# validate length qualifiers against each other
length_qualifiers = ['min_length', 'max_length']
for qualifier in length_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
header = 'Value %s for %s' % (test_value, value_path)
if 'min_length' in value.keys():
if test_value < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if test_value > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
# validate range qualifiers against each other & length qualifiers
range_qualifiers = ['min_value', 'max_value', 'greater_than', 'less_than', 'equal_to']
for qualifier in range_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
header = 'Value %s%s%s for %s' % (quote_text, test_value, quote_text, value_path)
if 'min_value' in value.keys():
if test_value < value['min_value']:
message = '%s must not be less than "min_value": %s' % (header, value['min_value'])
raise ModelValidationError(message)
if 'max_value' in value.keys():
if test_value > value['max_value']:
message = '%s must not be greater than "max_value": %s' % (header, value['max_value'])
raise ModelValidationError(message)
if 'greater_than' in value.keys():
if test_value <= value['greater_than'] and not qualifier == 'greater_than':
message = '%s must be "greater_than": %s' % (header, value['greater_than'])
raise ModelValidationError(message)
if 'less_than' in value.keys():
if test_value >= value['less_than'] and not qualifier == 'less_than':
message = '%s must be "less_than": %s' % (header, value['less_than'])
raise ModelValidationError(message)
if 'min_length' in value.keys():
if len(test_value) < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if len(test_value) > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
if 'integer_data' in value.keys():
if value['integer_data']:
if not isinstance(test_value, int):
message = '%s must be an "integer_data".' % header
raise ModelValidationError(message)
if 'must_not_contain' in value.keys():
for regex in value['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
message = '%s matches regex pattern in "must_not_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'must_contain' in value.keys():
for regex in value['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(test_value):
message = '%s does not match regex pattern in "must_contain": ["%s"].' % (header, regex)
raise ModelValidationError(message)
if 'contains_either' in value.keys():
regex_match = False
regex_patterns = []
for regex in value['contains_either']:
regex_patterns.append(regex)
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
regex_match = True
if not regex_match:
message = '%s does not match any regex patterns in "contains_either": %s' % (header, regex_patterns)
raise ModelValidationError(message)
if 'byte_data' in value.keys():
if value['byte_data']:
if qualifier != 'equal_to':
message = '%s cannot be used with base64 encoded "byte_data".' % header
raise ModelValidationError(message)
# validate discrete value qualifiers against other criteria
schema_field = self.keyCriteria[self.keyName.index(key)]
discrete_qualifiers = ['declared_value', 'default_value', 'excluded_values', 'discrete_values', 'example_values']
for qualifier in discrete_qualifiers:
test_qualifier = False
if qualifier in schema_field:
test_qualifier = True
if qualifier == 'declared_value' and not schema_field[qualifier]:
test_qualifier = False
if qualifier in value.keys() or (test_qualifier and declared_value):
multiple_values = False
if qualifier in value.keys():
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
else:
test_list = [schema_field[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
header = 'Value %s%s%s for %s%s' % (quote_text, test_value, quote_text, value_path, item_text)
if 'min_value' in value.keys():
if test_value < value['min_value']:
message = '%s must not be less than "min_value": %s' % (header, value['min_value'])
raise ModelValidationError(message)
if 'max_value' in value.keys():
if test_value > value['max_value']:
message = '%s must not be greater than "max_value": %s' % (header, value['max_value'])
raise ModelValidationError(message)
if 'equal_to' in value.keys():
if test_value != value['equal_to']:
if qualifier != 'declared_value' and isinstance(test_value, bool):
message = '%s must be "equal_to": %s' % (header, value['equal_to'])
raise ModelValidationError(message)
if 'greater_than' in value.keys():
if test_value <= value['greater_than']:
message = '%s must be "greater_than": %s' % (header, value['greater_than'])
raise ModelValidationError(message)
if 'less_than' in value.keys():
if test_value >= value['less_than']:
message = '%s must be "less_than": %s' % (header, value['less_than'])
raise ModelValidationError(message)
if 'integer_data' in value.keys():
if value['integer_data']:
if not isinstance(test_value, int):
message = '%s must be an "integer_data".' % header
raise ModelValidationError(message)
if 'min_length' in value.keys():
if len(test_value) < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if len(test_value) > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
if 'must_not_contain' in value.keys():
for regex in value['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
message = '%s matches regex pattern in "must_not_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'must_contain' in value.keys():
for regex in value['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(test_value):
message = '%s does not match regex pattern in "must_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'contains_either' in value.keys():
regex_match = False
regex_patterns = []
for regex in value['contains_either']:
regex_patterns.append(regex)
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
regex_match = True
if not regex_match:
message = '%s does not match any regex patterns in "contains_either": %s' % (header, regex_patterns)
raise ModelValidationError(message)
if 'byte_data' in value.keys():
message = '%s cannot be base64 decoded to "byte_data".' % header
try:
decoded_bytes = b64decode(test_value)
except:
raise ModelValidationError(message)
if not isinstance(decoded_bytes, bytes):
raise ModelValidationError(message)
# validate discrete value qualifiers against each other
for qualifier in discrete_qualifiers:
test_qualifier = False
if qualifier in schema_field:
test_qualifier = True
if qualifier == 'declared_value' and not schema_field[qualifier]:
test_qualifier = False
if qualifier in value.keys() or (test_qualifier and declared_value):
multiple_values = False
if qualifier in value.keys():
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
else:
test_list = [schema_field[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
header = 'Value %s%s%s for %s%s' % (quote_text, test_value, quote_text, value_path, item_text)
if 'excluded_values' in value.keys():
if not qualifier == 'excluded_values':
if test_value in value['excluded_values']:
message = '%s cannot be one of "excluded_values": %s.' % (header, value['excluded_values'])
raise ModelValidationError(message)
if 'discrete_values' in value.keys():
if not qualifier == 'excluded_values':
if test_value not in value['discrete_values']:
message = '%s must be one of "discrete_values": %s' % (header, value['discrete_values'])
raise ModelValidationError(message)
return fields_dict
def _validate_defaults(self, fields_dict):
# validate key names in fields
for key, value in fields_dict.items():
# retrieve value type and type dict
value_type = self.keyCriteria[self.keyName.index(key)]['value_datatype']
# validate discrete value qualifiers against other criteria
qualifier = 'default_value'
if qualifier in value.keys():
multiple_values = False
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
qualifier_text = value_path + item_text
header = 'Value %s%s%s for %s' % (quote_text, test_value, quote_text, qualifier_text)
if value_type == 'list':
default_item_key = '%s[0]' % key
try:
self.validate(test_value, default_item_key, object_title=header)
except Exception as err:
raise ModelValidationError(str(err).strip().replace('field %s' % default_item_key, qualifier_text))
return fields_dict
def _evaluate_field(self, record_dict, field_name, field_criteria):
''' a helper method for evaluating record values based upon query criteria
:param record_dict: dictionary with model valid data to evaluate
:param field_name: string with path to root of query field
:param field_criteria: dictionary with query operators and qualifiers
:return: boolean (True if no field_criteria evaluate to false)
'''
# determine value existence criteria
value_exists = True
if 'value_exists' in field_criteria.keys():
if not field_criteria['value_exists']:
value_exists = False
# validate existence of field
field_exists = True
try:
record_values = self._walk(field_name, record_dict)
except:
field_exists = False
# evaluate existence query criteria
if value_exists != field_exists:
return False
elif not value_exists:
return True
# convert javascript dot_path to class dot_path
field_key = field_name
if not field_name:
field_key = '.'
else:
if field_name[0] != '.':
field_key = '.%s' % field_name
# evaluate other query criteria
for key, value in field_criteria.items():
if key in ('min_size', 'min_length'):
found = False
if self.keyMap[field_key]['value_datatype'] == 'map':
for record_value in record_values:
record_size = sys.getsizeof(json.dumps(str(record_value)).replace(' ','')) - 51
if record_size >= value:
found = True
break
else:
for record_value in record_values:
if len(record_value) >= value:
found = True
break
if not found:
return False
elif key in ('max_size', 'max_length'):
found = False
if self.keyMap[field_key]['value_datatype'] == 'map':
for record_value in record_values:
record_size = sys.getsizeof(json.dumps(str(record_value)).replace(' ','')) - 51
if record_size <= value:
found = True
break
else:
for record_value in record_values:
if len(record_value) <= value:
found = True
break
if not found:
return False
elif key == 'min_value':
found = False
for record_value in record_values:
if record_value >= value:
found = True
break
if not found:
return False
elif key == 'max_value':
found = False
for record_value in record_values:
if record_value <= value:
found = True
break
if not found:
return False
elif key == 'equal_to':
found = False
for record_value in record_values:
if record_value == value:
found = True
break
if not found:
return False
elif key == 'greater_than':
found = False
for record_value in record_values:
if record_value > value:
found = True
break
if not found:
return False
elif key == 'less_than':
found = False
for record_value in record_values:
if record_value < value:
found = True
break
if not found:
return False
elif key == 'excluded_values':
for record_value in record_values:
if record_value in value:
return False
elif key == 'discrete_values':
found = False
for record_value in record_values:
if record_value in value:
found = True
break
if not found:
return False
elif key == 'integer_data':
found = False
dummy_int = 1
for record_value in record_values:
if record_value.__class__ == dummy_int.__class__:
found = True
break
if value != found:
return False
elif key == 'byte_data':
found = False
for record_value in record_values:
try:
decoded_bytes = b64decode(record_value)
except:
decoded_bytes = ''
if isinstance(decoded_bytes, bytes):
found = True
break
if value != found:
return False
elif key == 'must_contain':
for regex in value:
regex_pattern = re.compile(regex)
found = False
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if not found:
return False
elif key == 'must_not_contain':
for regex in value:
regex_pattern = re.compile(regex)
found = False
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if found:
return False
elif key == 'contains_either':
found = False
for regex in value:
regex_pattern = re.compile(regex)
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if found:
break
if not found:
return False
elif key == 'unique_values':
for record_value in record_values:
unique_values = True
if len(record_value) != len(set(record_value)):
unique_values = False
if value != unique_values:
return False
return True
def _validate_list(self, input_list, schema_list, path_to_root, object_title=''):
'''
a helper method for recursively validating items in a list
:return: input_list
'''
# construct rules for list and items
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
list_rules = self.keyMap[rules_path_to_root]
initial_key = rules_path_to_root + '[0]'
item_rules = self.keyMap[initial_key]
# construct list error report template
list_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': list_rules,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': 0,
'error_code': 4001
}
# validate list rules
if 'min_size' in list_rules.keys():
if len(input_list) < list_rules['min_size']:
list_error['failed_test'] = 'min_size'
list_error['error_value'] = len(input_list)
list_error['error_code'] = 4031
raise InputValidationError(list_error)
if 'max_size' in list_rules.keys():
if len(input_list) > list_rules['max_size']:
list_error['failed_test'] = 'max_size'
list_error['error_value'] = len(input_list)
list_error['error_code'] = 4032
raise InputValidationError(list_error)
# construct item error report template
item_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': item_rules,
'failed_test': 'value_datatype',
'input_path': initial_key,
'error_value': None,
'error_code': 4001
}
# validate datatype of items
for i in range(len(input_list)):
input_path = path_to_root + '[%s]' % i
item = input_list[i]
item_error['input_path'] = input_path
try:
item_index = self._datatype_classes.index(item.__class__)
except:
item_error['error_value'] = item.__class__.__name__
raise InputValidationError(item_error)
item_type = self._datatype_names[item_index]
item_error['error_value'] = item
if item_rules['value_datatype'] == 'null':
pass
else:
if item_type != item_rules['value_datatype']:
raise InputValidationError(item_error)
# call appropriate validation sub-routine for datatype of item
if item_type == 'boolean':
input_list[i] = self._validate_boolean(item, input_path, object_title)
elif item_type == 'number':
input_list[i] = self._validate_number(item, input_path, object_title)
elif item_type == 'string':
input_list[i] = self._validate_string(item, input_path, object_title)
elif item_type == 'map':
input_list[i] = self._validate_dict(item, schema_list[0], input_path, object_title)
elif item_type == 'list':
input_list[i] = self._validate_list(item, schema_list[0], input_path, object_title)
# validate unique values in list
if 'unique_values' in list_rules.keys():
if len(set(input_list)) < len(input_list):
list_error['failed_test'] = 'unique_values'
list_error['error_value'] = input_list
list_error['error_code'] = 4033
raise InputValidationError(list_error)
# TODO: validate top-level item values against identical to reference
# TODO: run lambda function and call validation url
return input_list
def _validate_number(self, input_number, path_to_root, object_title=''):
'''
a helper method for validating properties of a number
:return: input_number
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_number,
'error_code': 4001
}
if 'integer_data' in input_criteria.keys():
if input_criteria['integer_data'] and not isinstance(input_number, int):
error_dict['failed_test'] = 'integer_data'
error_dict['error_code'] = 4021
raise InputValidationError(error_dict)
if 'min_value' in input_criteria.keys():
if input_number < input_criteria['min_value']:
error_dict['failed_test'] = 'min_value'
error_dict['error_code'] = 4022
raise InputValidationError(error_dict)
if 'max_value' in input_criteria.keys():
if input_number > input_criteria['max_value']:
error_dict['failed_test'] = 'max_value'
error_dict['error_code'] = 4023
raise InputValidationError(error_dict)
if 'greater_than' in input_criteria.keys():
if input_number <= input_criteria['greater_than']:
error_dict['failed_test'] = 'greater_than'
error_dict['error_code'] = 4024
raise InputValidationError(error_dict)
if 'less_than' in input_criteria.keys():
if input_number >= input_criteria['less_than']:
error_dict['failed_test'] = 'less_than'
error_dict['error_code'] = 4025
raise InputValidationError(error_dict)
if 'equal_to' in input_criteria.keys():
if input_number != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
if 'discrete_values' in input_criteria.keys():
if input_number not in input_criteria['discrete_values']:
error_dict['failed_test'] = 'discrete_values'
error_dict['error_code'] = 4041
raise InputValidationError(error_dict)
if 'excluded_values' in input_criteria.keys():
if input_number in input_criteria['excluded_values']:
error_dict['failed_test'] = 'excluded_values'
error_dict['error_code'] = 4042
raise InputValidationError(error_dict)
# TODO: validate number against identical to reference
# TODO: run lambda function and call validation url
return input_number
def _validate_string(self, input_string, path_to_root, object_title=''):
'''
a helper method for validating properties of a string
:return: input_string
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_string,
'error_code': 4001
}
if 'byte_data' in input_criteria.keys():
if input_criteria['byte_data']:
error_dict['failed_test'] = 'byte_data'
error_dict['error_code'] = 4011
try:
decoded_bytes = b64decode(input_string)
except:
raise InputValidationError(error_dict)
if not isinstance(decoded_bytes, bytes):
raise InputValidationError(error_dict)
if 'min_value' in input_criteria.keys():
if input_string < input_criteria['min_value']:
error_dict['failed_test'] = 'min_value'
error_dict['error_code'] = 4022
raise InputValidationError(error_dict)
if 'max_value' in input_criteria.keys():
if input_string > input_criteria['max_value']:
error_dict['failed_test'] = 'max_value'
error_dict['error_code'] = 4023
raise InputValidationError(error_dict)
if 'greater_than' in input_criteria.keys():
if input_string <= input_criteria['greater_than']:
error_dict['failed_test'] = 'greater_than'
error_dict['error_code'] = 4024
raise InputValidationError(error_dict)
if 'less_than' in input_criteria.keys():
if input_string >= input_criteria['less_than']:
error_dict['failed_test'] = 'less_than'
error_dict['error_code'] = 4025
raise InputValidationError(error_dict)
if 'equal_to' in input_criteria.keys():
if input_string != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
if 'min_length' in input_criteria.keys():
if len(input_string) < input_criteria['min_length']:
error_dict['failed_test'] = 'min_length'
error_dict['error_code'] = 4012
raise InputValidationError(error_dict)
if 'max_length' in input_criteria.keys():
if len(input_string) > input_criteria['max_length']:
error_dict['failed_test'] = 'max_length'
error_dict['error_code'] = 4013
raise InputValidationError(error_dict)
if 'must_not_contain' in input_criteria.keys():
for regex in input_criteria['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(input_string):
error_dict['failed_test'] = 'must_not_contain'
error_dict['error_code'] = 4014
raise InputValidationError(error_dict)
if 'must_contain' in input_criteria.keys():
for regex in input_criteria['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(input_string):
error_dict['failed_test'] = 'must_contain'
error_dict['error_code'] = 4015
raise InputValidationError(error_dict)
if 'contains_either' in input_criteria.keys():
regex_match = False
for regex in input_criteria['contains_either']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(input_string):
regex_match = True
if not regex_match:
error_dict['failed_test'] = 'contains_either'
error_dict['error_code'] = 4016
raise InputValidationError(error_dict)
if 'discrete_values' in input_criteria.keys():
if input_string not in input_criteria['discrete_values']:
error_dict['failed_test'] = 'discrete_values'
error_dict['error_code'] = 4041
raise InputValidationError(error_dict)
if 'excluded_values' in input_criteria.keys():
if input_string in input_criteria['excluded_values']:
error_dict['failed_test'] = 'excluded_values'
error_dict['error_code'] = 4042
raise InputValidationError(error_dict)
# TODO: validate string against identical to reference
# TODO: run lambda function and call validation url
return input_string
def _validate_boolean(self, input_boolean, path_to_root, object_title=''):
'''
a helper method for validating properties of a boolean
:return: input_boolean
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_boolean,
'error_code': 4001
}
if 'equal_to' in input_criteria.keys():
if input_boolean != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
# TODO: validate boolean against identical to reference
# TODO: run lambda function and call validation url
return input_boolean
def _ingest_dict(self, input_dict, schema_dict, path_to_root):
'''
a helper method for ingesting keys, value pairs in a dictionary
:return: valid_dict
'''
valid_dict = {}
# construct path to root for rules
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
# iterate over keys in schema dict
for key, value in schema_dict.items():
key_path = path_to_root
if not key_path == '.':
key_path += '.'
key_path += key
rules_key_path = re.sub('\[\d+\]', '[0]', key_path)
value_match = False
if key in input_dict.keys():
value_index = self._datatype_classes.index(value.__class__)
value_type = self._datatype_names[value_index]
try:
v_index = self._datatype_classes.index(input_dict[key].__class__)
v_type = self._datatype_names[v_index]
if v_type == value_type:
value_match = True
except:
value_match = False
if value_match:
if value_type == 'null':
valid_dict[key] = input_dict[key]
elif value_type == 'boolean':
valid_dict[key] = self._ingest_boolean(input_dict[key], key_path)
elif value_type == 'number':
valid_dict[key] = self._ingest_number(input_dict[key], key_path)
elif value_type == 'string':
valid_dict[key] = self._ingest_string(input_dict[key], key_path)
elif value_type == 'map':
valid_dict[key] = self._ingest_dict(input_dict[key], schema_dict[key], key_path)
elif value_type == 'list':
valid_dict[key] = self._ingest_list(input_dict[key], schema_dict[key], key_path)
else:
value_type = self.keyMap[rules_key_path]['value_datatype']
if 'default_value' in self.keyMap[rules_key_path]:
valid_dict[key] = self.keyMap[rules_key_path]['default_value']
elif value_type == 'null':
valid_dict[key] = None
elif value_type == 'boolean':
valid_dict[key] = False
elif value_type == 'number':
valid_dict[key] = 0.0
if 'integer_data' in self.keyMap[rules_key_path].keys():
if self.keyMap[rules_key_path]['integer_data']:
valid_dict[key] = 0
elif value_type == 'string':
valid_dict[key] = ''
elif value_type == 'list':
valid_dict[key] = []
elif value_type == 'map':
valid_dict[key] = self._ingest_dict({}, schema_dict[key], key_path)
# add extra fields if set to True
if self.keyMap[rules_path_to_root]['extra_fields']:
for key, value in input_dict.items():
if key not in valid_dict.keys():
valid_dict[key] = value
return valid_dict
def _ingest_list(self, input_list, schema_list, path_to_root):
'''
a helper method for ingesting items in a list
:return: valid_list
'''
valid_list = []
# construct max list size
max_size = None
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'max_size' in self.keyMap[rules_path_to_root].keys():
if not self.keyMap[rules_path_to_root]['max_size']:
return valid_list
else:
max_size = self.keyMap[rules_path_to_root]['max_size']
# iterate over items in input list
if input_list:
rules_index = self._datatype_classes.index(schema_list[0].__class__)
rules_type = self._datatype_names[rules_index]
for i in range(len(input_list)):
item_path = '%s[%s]' % (path_to_root, i)
value_match = False
try:
item_index = self._datatype_classes.index(input_list[i].__class__)
item_type = self._datatype_names[item_index]
if item_type == rules_type:
value_match = True
except:
value_match = False
if value_match:
try:
if item_type == 'boolean':
valid_list.append(self._validate_boolean(input_list[i], item_path))
elif item_type == 'number':
valid_list.append(self._validate_number(input_list[i], item_path))
elif item_type == 'string':
valid_list.append(self._validate_string(input_list[i], item_path))
elif item_type == 'map':
valid_list.append(self._ingest_dict(input_list[i], schema_list[0], item_path))
elif item_type == 'list':
valid_list.append(self._ingest_list(input_list[i], schema_list[0], item_path))
except:
pass
if isinstance(max_size, int):
if len(valid_list) == max_size:
return valid_list
return valid_list
def _ingest_number(self, input_number, path_to_root):
'''
a helper method for ingesting a number
:return: valid_number
'''
valid_number = 0.0
try:
valid_number = self._validate_number(input_number, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_number = self.keyMap[rules_path_to_root]['default_value']
elif 'integer_data' in self.keyMap[rules_path_to_root].keys():
if self.keyMap[rules_path_to_root]['integer_data']:
valid_number = 0
return valid_number
def _ingest_string(self, input_string, path_to_root):
'''
a helper method for ingesting a string
:return: valid_string
'''
valid_string = ''
try:
valid_string = self._validate_string(input_string, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_string = self.keyMap[rules_path_to_root]['default_value']
return valid_string
def _ingest_boolean(self, input_boolean, path_to_root):
'''
a helper method for ingesting a boolean
:return: valid_boolean
'''
valid_boolean = False
try:
valid_boolean = self._validate_boolean(input_boolean, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_boolean = self.keyMap[rules_path_to_root]['default_value']
return valid_boolean
def _reconstruct(self, path_to_root):
'''
a helper method for finding the schema endpoint from a path to root
:param path_to_root: string with dot path to root from
:return: list, dict, string, number, or boolean at path to root
'''
# split path to root into segments
item_pattern = re.compile('\d+\\]')
dot_pattern = re.compile('\\.|\\[')
path_segments = dot_pattern.split(path_to_root)
# construct base schema endpoint
schema_endpoint = self.schema
# reconstruct schema endpoint from segments
if path_segments[1]:
for i in range(1,len(path_segments)):
if item_pattern.match(path_segments[i]):
schema_endpoint = schema_endpoint[0]
else:
schema_endpoint = schema_endpoint[path_segments[i]]
return schema_endpoint
def _walk(self, path_to_root, record_dict):
'''
a helper method for finding the record endpoint from a path to root
:param path_to_root: string with dot path to root from
:param record_dict:
:return: list, dict, string, number, or boolean at path to root
'''
# split path to root into segments
item_pattern = re.compile('\d+\\]')
dot_pattern = re.compile('\\.|\\[')
path_segments = dot_pattern.split(path_to_root)
# construct empty fields
record_endpoints = []
# determine starting position
if not path_segments[0]:
path_segments.pop(0)
# define internal recursive function
def _walk_int(path_segments, record_dict):
record_endpoint = record_dict
for i in range(0, len(path_segments)):
if item_pattern.match(path_segments[i]):
for j in range(0, len(record_endpoint)):
if len(path_segments) == 2:
record_endpoints.append(record_endpoint[j])
else:
stop_chain = False
for x in range(0, i):
if item_pattern.match(path_segments[x]):
stop_chain = True
if not stop_chain:
shortened_segments = []
for z in range(i + 1, len(path_segments)):
shortened_segments.append(path_segments[z])
_walk_int(shortened_segments, record_endpoint[j])
else:
stop_chain = False
for y in range(0, i):
if item_pattern.match(path_segments[y]):
stop_chain = True
if not stop_chain:
if len(path_segments) == i + 1:
record_endpoints.append(record_endpoint[path_segments[i]])
else:
record_endpoint = record_endpoint[path_segments[i]]
# conduct recursive walk
_walk_int(path_segments, record_dict)
return record_endpoints
def validate(self, input_data, path_to_root='', object_title=''):
'''
a core method for validating input against the model
input_data is only returned if all data is valid
:param input_data: list, dict, string, number, or boolean to validate
:param path_to_root: [optional] string with dot-path of model component
:param object_title: [optional] string with name of input to validate
:return: input_data (or InputValidationError)
'''
__name__ = '%s.validate' % self.__class__.__name__
_path_arg = '%s(path_to_root="...")' % __name__
_title_arg = '%s(object_title="...")' % __name__
# validate input
copy_path = path_to_root
if path_to_root:
if not isinstance(path_to_root, str):
raise ModelValidationError('%s must be a string.' % _path_arg)
else:
if path_to_root[0] != '.':
copy_path = '.%s' % path_to_root
if not copy_path in self.keyMap.keys():
raise ModelValidationError('%s does not exist in components %s.' % (_path_arg.replace('...', path_to_root), self.keyMap.keys()))
else:
copy_path = '.'
if object_title:
if not isinstance(object_title, str):
raise ModelValidationError('%s must be a string' % _title_arg)
# construct generic error dictionary
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[copy_path],
'failed_test': 'value_datatype',
'input_path': copy_path,
'error_value': input_data,
'error_code': 4001
}
# determine value type of input data
try:
input_index = self._datatype_classes.index(input_data.__class__)
except:
error_dict['error_value'] = input_data.__class__.__name__
raise InputValidationError(error_dict)
input_type = self._datatype_names[input_index]
# validate input data type
if input_type != self.keyMap[copy_path]['value_datatype']:
raise InputValidationError(error_dict)
# run helper method appropriate to data type
if input_type == 'boolean':
input_data = self._validate_boolean(input_data, copy_path, object_title)
elif input_type == 'number':
input_data = self._validate_number(input_data, copy_path, object_title)
elif input_type == 'string':
input_data = self._validate_string(input_data, copy_path, object_title)
elif input_type == 'list':
schema_list = self._reconstruct(copy_path)
input_data = self._validate_list(input_data, schema_list, copy_path, object_title)
elif input_type == 'map':
schema_dict = self._reconstruct(copy_path)
input_data = self._validate_dict(input_data, schema_dict, copy_path, object_title)
return input_data
def ingest(self, **kwargs):
'''
a core method to ingest and validate arbitrary keyword data
**NOTE: data is always returned with this method**
for each key in the model, a value is returned according
to the following priority:
1. value in kwargs if field passes validation test
2. default value declared for the key in the model
3. empty value appropriate to datatype of key in the model
**NOTE: as long as a default value is provided for each key-
value, returned data will be model valid
**NOTE: if 'extra_fields' is True for a dictionary, the key-
value pair of all fields in kwargs which are not declared in
the model will also be added to the corresponding dictionary
data
**NOTE: if 'max_size' is declared for a list, method will
stop adding input to the list once it reaches max size
:param kwargs: key, value pairs
:return: dictionary with keys and value
'''
__name__ = '%s.ingest' % self.__class__.__name__
schema_dict = self.schema
path_to_root = '.'
valid_data = self._ingest_dict(kwargs, schema_dict, path_to_root)
return valid_data
def query(self, query_criteria, valid_record=None):
'''
a core method for querying model valid data with criteria
**NOTE: input is only returned if all fields & qualifiers are valid for model
:param query_criteria: dictionary with model field names and query qualifiers
:param valid_record: dictionary with model valid record
:return: boolean (or QueryValidationError)
an example of how to construct the query_criteria argument:
query_criteria = {
'.path.to.number': {
'min_value': 4.5
},
'.path.to.string': {
'must_contain': [ '\\regex' ]
}
}
**NOTE: for a full list of operators for query_criteria based upon field
datatype, see either the query-rules.json file or REFERENCE file
'''
__name__ = '%s.query' % self.__class__.__name__
_query_arg = '%s(query_criteria={...})' % __name__
_record_arg = '%s(valid_record={...})' % __name__
# validate input
if not isinstance(query_criteria, dict):
raise ModelValidationError('%s must be a dictionary.' % _query_arg)
# convert javascript dot_path to class dot_path
criteria_copy = {}
equal_fields = []
dot_fields = []
for key, value in query_criteria.items():
copy_key = key
if not key:
copy_key = '.'
else:
if key[0] != '.':
copy_key = '.%s' % key
dot_fields.append(copy_key)
criteria_copy[copy_key] = value
if value.__class__ in self._datatype_classes[0:4]:
criteria_copy[copy_key] = {
'equal_to': value
}
equal_fields.append(copy_key)
# validate query criteria against query rules
query_kwargs = {
'fields_dict': criteria_copy,
'fields_rules': self.queryRules,
'declared_value': False
}
try:
self._validate_fields(**query_kwargs)
except ModelValidationError as err:
message = err.error['message']
for field in equal_fields:
equal_error = 'field %s qualifier equal_to' % field
if message.find(equal_error) > -1:
message = message.replace(equal_error, 'field %s' % field)
break
field_pattern = re.compile('ield\s(\..*?)\s')
field_name = field_pattern.findall(message)
if field_name:
if field_name[0] in dot_fields:
def _replace_field(x):
return 'ield %s ' % x.group(1)[1:]
message = field_pattern.sub(_replace_field, message)
raise QueryValidationError(message)
# query test record
if valid_record:
if not isinstance(valid_record, dict):
raise ModelValidationError('%s must be a dictionary.' % _record_arg)
for key, value in criteria_copy.items():
eval_outcome = self._evaluate_field(valid_record, key, value)
if not eval_outcome:
return False
return True
|
collectiveacuity/jsonModel
|
jsonmodel/validators.py
|
jsonModel._validate_list
|
python
|
def _validate_list(self, input_list, schema_list, path_to_root, object_title=''):
'''
a helper method for recursively validating items in a list
:return: input_list
'''
# construct rules for list and items
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
list_rules = self.keyMap[rules_path_to_root]
initial_key = rules_path_to_root + '[0]'
item_rules = self.keyMap[initial_key]
# construct list error report template
list_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': list_rules,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': 0,
'error_code': 4001
}
# validate list rules
if 'min_size' in list_rules.keys():
if len(input_list) < list_rules['min_size']:
list_error['failed_test'] = 'min_size'
list_error['error_value'] = len(input_list)
list_error['error_code'] = 4031
raise InputValidationError(list_error)
if 'max_size' in list_rules.keys():
if len(input_list) > list_rules['max_size']:
list_error['failed_test'] = 'max_size'
list_error['error_value'] = len(input_list)
list_error['error_code'] = 4032
raise InputValidationError(list_error)
# construct item error report template
item_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': item_rules,
'failed_test': 'value_datatype',
'input_path': initial_key,
'error_value': None,
'error_code': 4001
}
# validate datatype of items
for i in range(len(input_list)):
input_path = path_to_root + '[%s]' % i
item = input_list[i]
item_error['input_path'] = input_path
try:
item_index = self._datatype_classes.index(item.__class__)
except:
item_error['error_value'] = item.__class__.__name__
raise InputValidationError(item_error)
item_type = self._datatype_names[item_index]
item_error['error_value'] = item
if item_rules['value_datatype'] == 'null':
pass
else:
if item_type != item_rules['value_datatype']:
raise InputValidationError(item_error)
# call appropriate validation sub-routine for datatype of item
if item_type == 'boolean':
input_list[i] = self._validate_boolean(item, input_path, object_title)
elif item_type == 'number':
input_list[i] = self._validate_number(item, input_path, object_title)
elif item_type == 'string':
input_list[i] = self._validate_string(item, input_path, object_title)
elif item_type == 'map':
input_list[i] = self._validate_dict(item, schema_list[0], input_path, object_title)
elif item_type == 'list':
input_list[i] = self._validate_list(item, schema_list[0], input_path, object_title)
# validate unique values in list
if 'unique_values' in list_rules.keys():
if len(set(input_list)) < len(input_list):
list_error['failed_test'] = 'unique_values'
list_error['error_value'] = input_list
list_error['error_code'] = 4033
raise InputValidationError(list_error)
# TODO: validate top-level item values against identical to reference
# TODO: run lambda function and call validation url
return input_list
|
a helper method for recursively validating items in a list
:return: input_list
|
train
|
https://github.com/collectiveacuity/jsonModel/blob/1ea64c36d78add3faa7b85ff82c5ec685458c940/jsonmodel/validators.py#L906-L998
| null |
class jsonModel(object):
__rules__ = jsonLoader('jsonmodel', 'models/model-rules.json')
def __init__(self, data_model, query_rules=None):
'''
a method for testing data model declaration & initializing the class
:param data_model: dictionary with json model architecture
:param query_rules: [optional] dictionary with valid field type qualifiers
:return: object with jsonModel methods
'''
# validate schema input
if not isinstance(data_model, dict):
raise ModelValidationError('Data model must be a dictionary.')
elif 'schema' not in data_model.keys():
raise ModelValidationError('Data model must have a schema key.')
elif not isinstance(data_model['schema'], dict):
raise ModelValidationError('Value for the data model "schema" field must be a dictionary.')
elif not data_model['schema']:
raise ModelValidationError('Data model "schema" field must not be empty.')
# construct base methods
from copy import deepcopy
data_model = deepcopy(data_model)
self.schema = data_model['schema']
model_map = mapModel(self.schema)
self.keyName = model_map.keyName
self.keyCriteria = model_map.keyCriteria
# construct protected type classes
self._datatype_names = mapModel._datatype_names
self._datatype_classes = mapModel._datatype_classes
# validate absence of item designators in keys
item_pattern = re.compile('\[\d+\]')
for i in range(len(self.keyName)):
patterns_found = item_pattern.findall(self.keyName[i])
if patterns_found:
for designator in patterns_found:
if designator != '[0]':
message = 'Key name for schema field %s must not contain the item designator pattern %s' % (self.keyName[i], designator)
raise ModelValidationError(message)
# validate existence of first item in list declarations
key_set = set(self.keyName)
for i in range(len(self.keyName)):
if self.keyCriteria[i]['value_datatype'] == 'list':
item_key = self.keyName[i] + '[0]'
if not item_key in key_set:
message = 'Schema field %s must declare an initial item for the list.' % self.keyName[i]
raise ModelValidationError(message)
# alter list requirement if first item is empty
else:
item_index = self.keyName.index(item_key)
if not self.keyCriteria[item_index]['required_field']:
self.keyCriteria[i]['required_field'] = False
# validate title input & construct title method
self.title = ''
if 'title' in data_model.keys():
if not isinstance(data_model['title'], str):
raise ModelValidationError('Value for model title must be a string.')
self.title = data_model['title']
# validate description input & construct description method
self.description = ''
if 'description' in data_model.keys():
if not isinstance(data_model['description'], str):
raise ModelValidationError('Value for model description must be a string.')
self.description = data_model['description']
# validate url input & construct title method
self.url = ''
if 'url' in data_model.keys():
if not isinstance(data_model['url'], str):
raise ModelValidationError('Value for model url must be a string.')
self.title = data_model['url']
# validate metadata input & construct metadata method
self.metadata = {}
if 'metadata' in data_model.keys():
if not isinstance(data_model['metadata'], dict):
raise ModelValidationError('Value for model metadata must be a dictionary.')
self.metadata = data_model['metadata']
# validate max size input & construct maxSize property
# self.maxSize = None
# if 'max_size' in data_model.keys():
# if not isinstance(data_model['max_size'], int):
# raise ModelValidationError('Value for model max_size must be a positive integer.')
# elif data_model['max_size'] < 0:
# raise ModelValidationError('Value for model max_size must be a positive integer.')
# elif data_model['max_size']:
# self.maxSize = data_model['max_size']
# validate components input & construct component property
self.components = {}
if 'components' in data_model.keys():
if not isinstance(data_model['components'], dict):
raise ModelValidationError('Value for model components must be a dictionary.')
self.components = self._validate_fields(data_model['components'], self.__rules__['components'])
# construct keyMap fields from key names and key criteria
self.keyMap = {}
for i in range(len(self.keyName)):
self.keyMap[self.keyName[i]] = self.keyCriteria[i]
for key, value in self.components.items():
# convert javascript dot_path to class dot_path
dot_key = ''
if not key:
dot_key = '.'
else:
if key[0] != '.':
dot_key = '.%s' % key
# add component declarations to keyMap
if key in self.keyMap.keys():
for k, v in self.components[key].items():
self.keyMap[key][k] = v
elif dot_key and dot_key in self.keyMap.keys():
for k, v in self.components[key].items():
self.keyMap[dot_key][k] = v
# validate default values in lists
self._validate_defaults(self.keyMap)
# construct queryRules property from class model rules
self.queryRules = {}
for key, value in self.__rules__['components'].items():
remove_from_query = [ 'required_field', 'default_value', 'example_values', 'field_title', 'field_position', 'field_description', 'field_metadata', 'extra_fields' ]
field_qualifiers = {
'value_exists': False
}
for k, v in value.items():
if k not in remove_from_query:
field_qualifiers[k] = v
self.queryRules[key] = field_qualifiers
# validate query rules input and replace queryRules property
if query_rules:
if not isinstance(query_rules, dict):
message = 'Value for query rules input must be a dictionary.'
raise ModelValidationError(message)
input_set = set(query_rules.keys())
req_set = set(self.queryRules.keys())
if input_set - req_set:
message = 'Query rules input may only have %s field key names.' % req_set
raise ModelValidationError(message)
elif req_set - input_set:
message = 'Query rules input must have all %s field key names.' % req_set
raise ModelValidationError(message)
for key in req_set:
if not isinstance(query_rules[key], dict):
message = 'Value for query rules %s field must be a dictionary.' % key
raise ModelValidationError(message)
input_qualifier_set = set(query_rules[key].keys())
req_qualifier_set = set(self.queryRules[key].keys())
if input_qualifier_set - req_qualifier_set:
message = 'Query rules field %s may only have qualifiers %s' % (key, req_qualifier_set)
raise ModelValidationError(message)
for k, v in query_rules[key].items():
if v.__class__ != self.queryRules[key][k].__class__:
qualifier_index = self._datatype_classes.index(self.queryRules[key][k].__class__)
qualifier_type = self._datatype_names[qualifier_index]
message = 'Value for query rules field %s qualifier %s must be a "%s" datatype.' % (key, k, qualifier_type)
raise ModelValidationError(message)
self.queryRules = query_rules
def _validate_fields(self, fields_dict, fields_rules, declared_value=True):
# validate key names in fields
for key, value in fields_dict.items():
# convert javascript dot_path to class dot_path
if not key:
key = '.'
else:
if key[0] != '.':
key = '.%s' % key
if key not in self.keyName:
raise ModelValidationError('Field %s is not a field declared in model schema.' % key)
elif not isinstance(value, dict):
raise ModelValidationError('Value for field %s must be a dictionary.' % key)
# validate field criteria are appropriate to field datatype
value_type = self.keyCriteria[self.keyName.index(key)]['value_datatype']
type_dict = {}
if value_type == 'string':
type_dict = fields_rules['.string_fields']
elif value_type == 'number':
type_dict = fields_rules['.number_fields']
elif value_type == 'boolean':
type_dict = fields_rules['.boolean_fields']
elif value_type == 'list':
type_dict = fields_rules['.list_fields']
elif value_type == 'map':
type_dict = fields_rules['.map_fields']
elif value_type == 'null':
type_dict = fields_rules['.null_fields']
if set(value.keys()) - set(type_dict.keys()):
raise ModelValidationError('Field %s may only have datatype %s qualifiers %s.' % (key, value_type, set(type_dict.keys())))
# validate criteria qualifier values are appropriate datatype
for k, v in value.items():
v_index = self._datatype_classes.index(v.__class__)
v_type = self._datatype_names[v_index]
qualifier_index = self._datatype_classes.index(type_dict[k].__class__)
qualifier_type = self._datatype_names[qualifier_index]
if v_type != qualifier_type:
message = 'Value for field %s qualifier %s must be a %s datatype.' % (key, k, qualifier_type)
raise ModelValidationError(message)
if qualifier_type == 'number':
if isinstance(type_dict[k], int):
if not isinstance(v, int):
message = 'Value for field %s qualifier %s must be an integer.' % (key, k)
raise ModelValidationError(message)
# validate internal logic of each qualifier value declaration
if k in ('must_not_contain', 'must_contain', 'contains_either'):
for item in v:
if not isinstance(item, str):
message = 'Each item in list field %s qualifier %s must be a string.' % (key, k)
raise ModelValidationError(message)
if k in ('min_length', 'max_length', 'min_size', 'max_size'):
if v < 0:
message = 'Value for field %s qualifier %s cannot be negative.' % (key, k)
raise ModelValidationError(message)
if k in ('discrete_values', 'excluded_values', 'example_values'):
for item in v:
if value_type == 'number':
if not isinstance(item, int) and not isinstance(item, float):
message = 'Each item in field %s qualifier %s list must be a number.' % (key, k)
raise ModelValidationError(message)
elif not isinstance(item, str):
message = 'Each item in list for field %s qualifier %s must be a string.' % (key, k)
raise ModelValidationError(message)
if k == 'identical_to':
if not v in self.keyName:
message = 'Value "%s" for field %s qualifier %s not found in components keys.' % (v, key, k)
raise ModelValidationError(message)
if k == 'unique_values':
if v:
item_name = key + '[0]'
item_type = self.keyCriteria[self.keyName.index(item_name)]['value_datatype']
if not item_type in ('number', 'string'):
message = 'Field %s[0] must be either a string or number if qualifier "unique_values": true' % key
raise ModelValidationError(message)
# validate lack of other qualifiers if value exist is false
if 'value_exists' in value.keys():
if not value['value_exists']:
if set(value.keys()) - {'value_exists'}:
message = 'If field %s qualifier value_exists: false, field may not have other qualifiers.' % key
raise ModelValidationError(message)
# validate size qualifiers against each other
size_qualifiers = ['min_size', 'max_size']
for qualifier in size_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
header = 'Value %s for %s' % (test_value, value_path)
if 'min_size' in value.keys():
if test_value < value['min_size']:
message = '%s must not be less than "min_size": %s' % (header, value['min_size'])
raise ModelValidationError(message)
if 'max_size' in value.keys():
if test_value > value['max_size']:
message = '%s must not be greater than "max_size": %s' % (header, value['max_size'])
raise ModelValidationError(message)
# validate length qualifiers against each other
length_qualifiers = ['min_length', 'max_length']
for qualifier in length_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
header = 'Value %s for %s' % (test_value, value_path)
if 'min_length' in value.keys():
if test_value < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if test_value > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
# validate range qualifiers against each other & length qualifiers
range_qualifiers = ['min_value', 'max_value', 'greater_than', 'less_than', 'equal_to']
for qualifier in range_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
header = 'Value %s%s%s for %s' % (quote_text, test_value, quote_text, value_path)
if 'min_value' in value.keys():
if test_value < value['min_value']:
message = '%s must not be less than "min_value": %s' % (header, value['min_value'])
raise ModelValidationError(message)
if 'max_value' in value.keys():
if test_value > value['max_value']:
message = '%s must not be greater than "max_value": %s' % (header, value['max_value'])
raise ModelValidationError(message)
if 'greater_than' in value.keys():
if test_value <= value['greater_than'] and not qualifier == 'greater_than':
message = '%s must be "greater_than": %s' % (header, value['greater_than'])
raise ModelValidationError(message)
if 'less_than' in value.keys():
if test_value >= value['less_than'] and not qualifier == 'less_than':
message = '%s must be "less_than": %s' % (header, value['less_than'])
raise ModelValidationError(message)
if 'min_length' in value.keys():
if len(test_value) < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if len(test_value) > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
if 'integer_data' in value.keys():
if value['integer_data']:
if not isinstance(test_value, int):
message = '%s must be an "integer_data".' % header
raise ModelValidationError(message)
if 'must_not_contain' in value.keys():
for regex in value['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
message = '%s matches regex pattern in "must_not_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'must_contain' in value.keys():
for regex in value['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(test_value):
message = '%s does not match regex pattern in "must_contain": ["%s"].' % (header, regex)
raise ModelValidationError(message)
if 'contains_either' in value.keys():
regex_match = False
regex_patterns = []
for regex in value['contains_either']:
regex_patterns.append(regex)
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
regex_match = True
if not regex_match:
message = '%s does not match any regex patterns in "contains_either": %s' % (header, regex_patterns)
raise ModelValidationError(message)
if 'byte_data' in value.keys():
if value['byte_data']:
if qualifier != 'equal_to':
message = '%s cannot be used with base64 encoded "byte_data".' % header
raise ModelValidationError(message)
# validate discrete value qualifiers against other criteria
schema_field = self.keyCriteria[self.keyName.index(key)]
discrete_qualifiers = ['declared_value', 'default_value', 'excluded_values', 'discrete_values', 'example_values']
for qualifier in discrete_qualifiers:
test_qualifier = False
if qualifier in schema_field:
test_qualifier = True
if qualifier == 'declared_value' and not schema_field[qualifier]:
test_qualifier = False
if qualifier in value.keys() or (test_qualifier and declared_value):
multiple_values = False
if qualifier in value.keys():
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
else:
test_list = [schema_field[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
header = 'Value %s%s%s for %s%s' % (quote_text, test_value, quote_text, value_path, item_text)
if 'min_value' in value.keys():
if test_value < value['min_value']:
message = '%s must not be less than "min_value": %s' % (header, value['min_value'])
raise ModelValidationError(message)
if 'max_value' in value.keys():
if test_value > value['max_value']:
message = '%s must not be greater than "max_value": %s' % (header, value['max_value'])
raise ModelValidationError(message)
if 'equal_to' in value.keys():
if test_value != value['equal_to']:
if qualifier != 'declared_value' and isinstance(test_value, bool):
message = '%s must be "equal_to": %s' % (header, value['equal_to'])
raise ModelValidationError(message)
if 'greater_than' in value.keys():
if test_value <= value['greater_than']:
message = '%s must be "greater_than": %s' % (header, value['greater_than'])
raise ModelValidationError(message)
if 'less_than' in value.keys():
if test_value >= value['less_than']:
message = '%s must be "less_than": %s' % (header, value['less_than'])
raise ModelValidationError(message)
if 'integer_data' in value.keys():
if value['integer_data']:
if not isinstance(test_value, int):
message = '%s must be an "integer_data".' % header
raise ModelValidationError(message)
if 'min_length' in value.keys():
if len(test_value) < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if len(test_value) > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
if 'must_not_contain' in value.keys():
for regex in value['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
message = '%s matches regex pattern in "must_not_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'must_contain' in value.keys():
for regex in value['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(test_value):
message = '%s does not match regex pattern in "must_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'contains_either' in value.keys():
regex_match = False
regex_patterns = []
for regex in value['contains_either']:
regex_patterns.append(regex)
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
regex_match = True
if not regex_match:
message = '%s does not match any regex patterns in "contains_either": %s' % (header, regex_patterns)
raise ModelValidationError(message)
if 'byte_data' in value.keys():
message = '%s cannot be base64 decoded to "byte_data".' % header
try:
decoded_bytes = b64decode(test_value)
except:
raise ModelValidationError(message)
if not isinstance(decoded_bytes, bytes):
raise ModelValidationError(message)
# validate discrete value qualifiers against each other
for qualifier in discrete_qualifiers:
test_qualifier = False
if qualifier in schema_field:
test_qualifier = True
if qualifier == 'declared_value' and not schema_field[qualifier]:
test_qualifier = False
if qualifier in value.keys() or (test_qualifier and declared_value):
multiple_values = False
if qualifier in value.keys():
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
else:
test_list = [schema_field[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
header = 'Value %s%s%s for %s%s' % (quote_text, test_value, quote_text, value_path, item_text)
if 'excluded_values' in value.keys():
if not qualifier == 'excluded_values':
if test_value in value['excluded_values']:
message = '%s cannot be one of "excluded_values": %s.' % (header, value['excluded_values'])
raise ModelValidationError(message)
if 'discrete_values' in value.keys():
if not qualifier == 'excluded_values':
if test_value not in value['discrete_values']:
message = '%s must be one of "discrete_values": %s' % (header, value['discrete_values'])
raise ModelValidationError(message)
return fields_dict
def _validate_defaults(self, fields_dict):
# validate key names in fields
for key, value in fields_dict.items():
# retrieve value type and type dict
value_type = self.keyCriteria[self.keyName.index(key)]['value_datatype']
# validate discrete value qualifiers against other criteria
qualifier = 'default_value'
if qualifier in value.keys():
multiple_values = False
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
qualifier_text = value_path + item_text
header = 'Value %s%s%s for %s' % (quote_text, test_value, quote_text, qualifier_text)
if value_type == 'list':
default_item_key = '%s[0]' % key
try:
self.validate(test_value, default_item_key, object_title=header)
except Exception as err:
raise ModelValidationError(str(err).strip().replace('field %s' % default_item_key, qualifier_text))
return fields_dict
def _evaluate_field(self, record_dict, field_name, field_criteria):
''' a helper method for evaluating record values based upon query criteria
:param record_dict: dictionary with model valid data to evaluate
:param field_name: string with path to root of query field
:param field_criteria: dictionary with query operators and qualifiers
:return: boolean (True if no field_criteria evaluate to false)
'''
# determine value existence criteria
value_exists = True
if 'value_exists' in field_criteria.keys():
if not field_criteria['value_exists']:
value_exists = False
# validate existence of field
field_exists = True
try:
record_values = self._walk(field_name, record_dict)
except:
field_exists = False
# evaluate existence query criteria
if value_exists != field_exists:
return False
elif not value_exists:
return True
# convert javascript dot_path to class dot_path
field_key = field_name
if not field_name:
field_key = '.'
else:
if field_name[0] != '.':
field_key = '.%s' % field_name
# evaluate other query criteria
for key, value in field_criteria.items():
if key in ('min_size', 'min_length'):
found = False
if self.keyMap[field_key]['value_datatype'] == 'map':
for record_value in record_values:
record_size = sys.getsizeof(json.dumps(str(record_value)).replace(' ','')) - 51
if record_size >= value:
found = True
break
else:
for record_value in record_values:
if len(record_value) >= value:
found = True
break
if not found:
return False
elif key in ('max_size', 'max_length'):
found = False
if self.keyMap[field_key]['value_datatype'] == 'map':
for record_value in record_values:
record_size = sys.getsizeof(json.dumps(str(record_value)).replace(' ','')) - 51
if record_size <= value:
found = True
break
else:
for record_value in record_values:
if len(record_value) <= value:
found = True
break
if not found:
return False
elif key == 'min_value':
found = False
for record_value in record_values:
if record_value >= value:
found = True
break
if not found:
return False
elif key == 'max_value':
found = False
for record_value in record_values:
if record_value <= value:
found = True
break
if not found:
return False
elif key == 'equal_to':
found = False
for record_value in record_values:
if record_value == value:
found = True
break
if not found:
return False
elif key == 'greater_than':
found = False
for record_value in record_values:
if record_value > value:
found = True
break
if not found:
return False
elif key == 'less_than':
found = False
for record_value in record_values:
if record_value < value:
found = True
break
if not found:
return False
elif key == 'excluded_values':
for record_value in record_values:
if record_value in value:
return False
elif key == 'discrete_values':
found = False
for record_value in record_values:
if record_value in value:
found = True
break
if not found:
return False
elif key == 'integer_data':
found = False
dummy_int = 1
for record_value in record_values:
if record_value.__class__ == dummy_int.__class__:
found = True
break
if value != found:
return False
elif key == 'byte_data':
found = False
for record_value in record_values:
try:
decoded_bytes = b64decode(record_value)
except:
decoded_bytes = ''
if isinstance(decoded_bytes, bytes):
found = True
break
if value != found:
return False
elif key == 'must_contain':
for regex in value:
regex_pattern = re.compile(regex)
found = False
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if not found:
return False
elif key == 'must_not_contain':
for regex in value:
regex_pattern = re.compile(regex)
found = False
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if found:
return False
elif key == 'contains_either':
found = False
for regex in value:
regex_pattern = re.compile(regex)
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if found:
break
if not found:
return False
elif key == 'unique_values':
for record_value in record_values:
unique_values = True
if len(record_value) != len(set(record_value)):
unique_values = False
if value != unique_values:
return False
return True
def _validate_dict(self, input_dict, schema_dict, path_to_root, object_title=''):
''' a helper method for recursively validating keys in dictionaries
:return input_dict
'''
# reconstruct key path to current dictionary in model
rules_top_level_key = re.sub('\[\d+\]', '[0]', path_to_root)
map_rules = self.keyMap[rules_top_level_key]
# construct list error report template
map_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': map_rules,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': 0,
'error_code': 4001
}
# validate map size
if 'min_size' in map_rules.keys():
input_size = sys.getsizeof(json.dumps(str(input_dict)).replace(' ','')) - 51
if input_size < map_rules['min_size']:
map_error['failed_test'] = 'min_size'
map_error['error_value'] = input_size
map_error['error_code'] = 4031
raise InputValidationError(map_error)
if 'max_size' in map_rules.keys():
input_size = sys.getsizeof(json.dumps(str(input_dict)).replace(' ','')) - 51
if input_size > map_rules['max_size']:
map_error['failed_test'] = 'max_size'
map_error['error_value'] = input_size
map_error['error_code'] = 4032
raise InputValidationError(map_error)
# construct lists of keys in input dictionary
input_keys = []
input_key_list = []
for key in input_dict.keys():
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'key_datatype',
'input_path': path_to_root,
'error_value': key,
'error_code': 4004
}
error_dict['input_criteria']['key_datatype'] = 'string'
if path_to_root == '.':
if not isinstance(key, str):
input_key_name = path_to_root + str(key)
error_dict['input_path'] = input_key_name
raise InputValidationError(error_dict)
input_key_name = path_to_root + key
else:
if not isinstance(key, str):
input_key_name = path_to_root + '.' + str(key)
error_dict['input_path'] = input_key_name
raise InputValidationError(error_dict)
input_key_name = path_to_root + '.' + key
input_keys.append(input_key_name)
input_key_list.append(key)
# TODO: validate top-level key and values against identical to reference
# TODO: run lambda function and call validation
# construct lists of keys in schema dictionary
max_keys = []
max_key_list = []
req_keys = []
req_key_list = []
for key in schema_dict.keys():
if path_to_root == '.':
schema_key_name = path_to_root + key
else:
schema_key_name = path_to_root + '.' + key
max_keys.append(schema_key_name)
max_key_list.append(key)
rules_schema_key_name = re.sub('\[\d+\]', '[0]', schema_key_name)
if self.keyMap[rules_schema_key_name]['required_field']:
req_keys.append(schema_key_name)
req_key_list.append(key)
# validate existence of required fields
missing_keys = set(req_keys) - set(input_keys)
if missing_keys:
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'required_field',
'input_path': path_to_root,
'error_value': list(missing_keys),
'error_code': 4002
}
error_dict['input_criteria']['required_keys'] = req_keys
raise InputValidationError(error_dict)
# validate existence of extra fields
extra_keys = set(input_keys) - set(max_keys)
if extra_keys and not self.keyMap[rules_top_level_key]['extra_fields']:
extra_key_list = []
for key in extra_keys:
pathless_key = re.sub(rules_top_level_key, '', key, count=1)
extra_key_list.append(pathless_key)
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'extra_fields',
'input_path': path_to_root,
'error_value': extra_key_list,
'error_code': 4003
}
error_dict['input_criteria']['maximum_scope'] = max_key_list
raise InputValidationError(error_dict)
# validate datatype of value
for key, value in input_dict.items():
if path_to_root == '.':
input_key_name = path_to_root + key
else:
input_key_name = path_to_root + '.' + key
rules_input_key_name = re.sub('\[\d+\]', '[0]', input_key_name)
if input_key_name in max_keys:
input_criteria = self.keyMap[rules_input_key_name]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': input_key_name,
'error_value': value,
'error_code': 4001
}
try:
value_index = self._datatype_classes.index(value.__class__)
except:
error_dict['error_value'] = value.__class__.__name__
raise InputValidationError(error_dict)
value_type = self._datatype_names[value_index]
if input_criteria['value_datatype'] == 'null':
pass
else:
if value_type != input_criteria['value_datatype']:
raise InputValidationError(error_dict)
# call appropriate validation sub-routine for datatype of value
if value_type == 'boolean':
input_dict[key] = self._validate_boolean(value, input_key_name, object_title)
elif value_type == 'number':
input_dict[key] = self._validate_number(value, input_key_name, object_title)
elif value_type == 'string':
input_dict[key] = self._validate_string(value, input_key_name, object_title)
elif value_type == 'map':
input_dict[key] = self._validate_dict(value, schema_dict[key], input_key_name, object_title)
elif value_type == 'list':
input_dict[key] = self._validate_list(value, schema_dict[key], input_key_name, object_title)
# set default values for empty optional fields
for key in max_key_list:
if key not in input_key_list:
indexed_key = max_keys[max_key_list.index(key)]
if indexed_key in self.components.keys():
if 'default_value' in self.components[indexed_key]:
input_dict[key] = self.components[indexed_key]['default_value']
return input_dict
def _validate_number(self, input_number, path_to_root, object_title=''):
'''
a helper method for validating properties of a number
:return: input_number
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_number,
'error_code': 4001
}
if 'integer_data' in input_criteria.keys():
if input_criteria['integer_data'] and not isinstance(input_number, int):
error_dict['failed_test'] = 'integer_data'
error_dict['error_code'] = 4021
raise InputValidationError(error_dict)
if 'min_value' in input_criteria.keys():
if input_number < input_criteria['min_value']:
error_dict['failed_test'] = 'min_value'
error_dict['error_code'] = 4022
raise InputValidationError(error_dict)
if 'max_value' in input_criteria.keys():
if input_number > input_criteria['max_value']:
error_dict['failed_test'] = 'max_value'
error_dict['error_code'] = 4023
raise InputValidationError(error_dict)
if 'greater_than' in input_criteria.keys():
if input_number <= input_criteria['greater_than']:
error_dict['failed_test'] = 'greater_than'
error_dict['error_code'] = 4024
raise InputValidationError(error_dict)
if 'less_than' in input_criteria.keys():
if input_number >= input_criteria['less_than']:
error_dict['failed_test'] = 'less_than'
error_dict['error_code'] = 4025
raise InputValidationError(error_dict)
if 'equal_to' in input_criteria.keys():
if input_number != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
if 'discrete_values' in input_criteria.keys():
if input_number not in input_criteria['discrete_values']:
error_dict['failed_test'] = 'discrete_values'
error_dict['error_code'] = 4041
raise InputValidationError(error_dict)
if 'excluded_values' in input_criteria.keys():
if input_number in input_criteria['excluded_values']:
error_dict['failed_test'] = 'excluded_values'
error_dict['error_code'] = 4042
raise InputValidationError(error_dict)
# TODO: validate number against identical to reference
# TODO: run lambda function and call validation url
return input_number
def _validate_string(self, input_string, path_to_root, object_title=''):
'''
a helper method for validating properties of a string
:return: input_string
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_string,
'error_code': 4001
}
if 'byte_data' in input_criteria.keys():
if input_criteria['byte_data']:
error_dict['failed_test'] = 'byte_data'
error_dict['error_code'] = 4011
try:
decoded_bytes = b64decode(input_string)
except:
raise InputValidationError(error_dict)
if not isinstance(decoded_bytes, bytes):
raise InputValidationError(error_dict)
if 'min_value' in input_criteria.keys():
if input_string < input_criteria['min_value']:
error_dict['failed_test'] = 'min_value'
error_dict['error_code'] = 4022
raise InputValidationError(error_dict)
if 'max_value' in input_criteria.keys():
if input_string > input_criteria['max_value']:
error_dict['failed_test'] = 'max_value'
error_dict['error_code'] = 4023
raise InputValidationError(error_dict)
if 'greater_than' in input_criteria.keys():
if input_string <= input_criteria['greater_than']:
error_dict['failed_test'] = 'greater_than'
error_dict['error_code'] = 4024
raise InputValidationError(error_dict)
if 'less_than' in input_criteria.keys():
if input_string >= input_criteria['less_than']:
error_dict['failed_test'] = 'less_than'
error_dict['error_code'] = 4025
raise InputValidationError(error_dict)
if 'equal_to' in input_criteria.keys():
if input_string != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
if 'min_length' in input_criteria.keys():
if len(input_string) < input_criteria['min_length']:
error_dict['failed_test'] = 'min_length'
error_dict['error_code'] = 4012
raise InputValidationError(error_dict)
if 'max_length' in input_criteria.keys():
if len(input_string) > input_criteria['max_length']:
error_dict['failed_test'] = 'max_length'
error_dict['error_code'] = 4013
raise InputValidationError(error_dict)
if 'must_not_contain' in input_criteria.keys():
for regex in input_criteria['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(input_string):
error_dict['failed_test'] = 'must_not_contain'
error_dict['error_code'] = 4014
raise InputValidationError(error_dict)
if 'must_contain' in input_criteria.keys():
for regex in input_criteria['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(input_string):
error_dict['failed_test'] = 'must_contain'
error_dict['error_code'] = 4015
raise InputValidationError(error_dict)
if 'contains_either' in input_criteria.keys():
regex_match = False
for regex in input_criteria['contains_either']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(input_string):
regex_match = True
if not regex_match:
error_dict['failed_test'] = 'contains_either'
error_dict['error_code'] = 4016
raise InputValidationError(error_dict)
if 'discrete_values' in input_criteria.keys():
if input_string not in input_criteria['discrete_values']:
error_dict['failed_test'] = 'discrete_values'
error_dict['error_code'] = 4041
raise InputValidationError(error_dict)
if 'excluded_values' in input_criteria.keys():
if input_string in input_criteria['excluded_values']:
error_dict['failed_test'] = 'excluded_values'
error_dict['error_code'] = 4042
raise InputValidationError(error_dict)
# TODO: validate string against identical to reference
# TODO: run lambda function and call validation url
return input_string
def _validate_boolean(self, input_boolean, path_to_root, object_title=''):
'''
a helper method for validating properties of a boolean
:return: input_boolean
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_boolean,
'error_code': 4001
}
if 'equal_to' in input_criteria.keys():
if input_boolean != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
# TODO: validate boolean against identical to reference
# TODO: run lambda function and call validation url
return input_boolean
def _ingest_dict(self, input_dict, schema_dict, path_to_root):
'''
a helper method for ingesting keys, value pairs in a dictionary
:return: valid_dict
'''
valid_dict = {}
# construct path to root for rules
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
# iterate over keys in schema dict
for key, value in schema_dict.items():
key_path = path_to_root
if not key_path == '.':
key_path += '.'
key_path += key
rules_key_path = re.sub('\[\d+\]', '[0]', key_path)
value_match = False
if key in input_dict.keys():
value_index = self._datatype_classes.index(value.__class__)
value_type = self._datatype_names[value_index]
try:
v_index = self._datatype_classes.index(input_dict[key].__class__)
v_type = self._datatype_names[v_index]
if v_type == value_type:
value_match = True
except:
value_match = False
if value_match:
if value_type == 'null':
valid_dict[key] = input_dict[key]
elif value_type == 'boolean':
valid_dict[key] = self._ingest_boolean(input_dict[key], key_path)
elif value_type == 'number':
valid_dict[key] = self._ingest_number(input_dict[key], key_path)
elif value_type == 'string':
valid_dict[key] = self._ingest_string(input_dict[key], key_path)
elif value_type == 'map':
valid_dict[key] = self._ingest_dict(input_dict[key], schema_dict[key], key_path)
elif value_type == 'list':
valid_dict[key] = self._ingest_list(input_dict[key], schema_dict[key], key_path)
else:
value_type = self.keyMap[rules_key_path]['value_datatype']
if 'default_value' in self.keyMap[rules_key_path]:
valid_dict[key] = self.keyMap[rules_key_path]['default_value']
elif value_type == 'null':
valid_dict[key] = None
elif value_type == 'boolean':
valid_dict[key] = False
elif value_type == 'number':
valid_dict[key] = 0.0
if 'integer_data' in self.keyMap[rules_key_path].keys():
if self.keyMap[rules_key_path]['integer_data']:
valid_dict[key] = 0
elif value_type == 'string':
valid_dict[key] = ''
elif value_type == 'list':
valid_dict[key] = []
elif value_type == 'map':
valid_dict[key] = self._ingest_dict({}, schema_dict[key], key_path)
# add extra fields if set to True
if self.keyMap[rules_path_to_root]['extra_fields']:
for key, value in input_dict.items():
if key not in valid_dict.keys():
valid_dict[key] = value
return valid_dict
def _ingest_list(self, input_list, schema_list, path_to_root):
'''
a helper method for ingesting items in a list
:return: valid_list
'''
valid_list = []
# construct max list size
max_size = None
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'max_size' in self.keyMap[rules_path_to_root].keys():
if not self.keyMap[rules_path_to_root]['max_size']:
return valid_list
else:
max_size = self.keyMap[rules_path_to_root]['max_size']
# iterate over items in input list
if input_list:
rules_index = self._datatype_classes.index(schema_list[0].__class__)
rules_type = self._datatype_names[rules_index]
for i in range(len(input_list)):
item_path = '%s[%s]' % (path_to_root, i)
value_match = False
try:
item_index = self._datatype_classes.index(input_list[i].__class__)
item_type = self._datatype_names[item_index]
if item_type == rules_type:
value_match = True
except:
value_match = False
if value_match:
try:
if item_type == 'boolean':
valid_list.append(self._validate_boolean(input_list[i], item_path))
elif item_type == 'number':
valid_list.append(self._validate_number(input_list[i], item_path))
elif item_type == 'string':
valid_list.append(self._validate_string(input_list[i], item_path))
elif item_type == 'map':
valid_list.append(self._ingest_dict(input_list[i], schema_list[0], item_path))
elif item_type == 'list':
valid_list.append(self._ingest_list(input_list[i], schema_list[0], item_path))
except:
pass
if isinstance(max_size, int):
if len(valid_list) == max_size:
return valid_list
return valid_list
def _ingest_number(self, input_number, path_to_root):
'''
a helper method for ingesting a number
:return: valid_number
'''
valid_number = 0.0
try:
valid_number = self._validate_number(input_number, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_number = self.keyMap[rules_path_to_root]['default_value']
elif 'integer_data' in self.keyMap[rules_path_to_root].keys():
if self.keyMap[rules_path_to_root]['integer_data']:
valid_number = 0
return valid_number
def _ingest_string(self, input_string, path_to_root):
'''
a helper method for ingesting a string
:return: valid_string
'''
valid_string = ''
try:
valid_string = self._validate_string(input_string, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_string = self.keyMap[rules_path_to_root]['default_value']
return valid_string
def _ingest_boolean(self, input_boolean, path_to_root):
'''
a helper method for ingesting a boolean
:return: valid_boolean
'''
valid_boolean = False
try:
valid_boolean = self._validate_boolean(input_boolean, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_boolean = self.keyMap[rules_path_to_root]['default_value']
return valid_boolean
def _reconstruct(self, path_to_root):
'''
a helper method for finding the schema endpoint from a path to root
:param path_to_root: string with dot path to root from
:return: list, dict, string, number, or boolean at path to root
'''
# split path to root into segments
item_pattern = re.compile('\d+\\]')
dot_pattern = re.compile('\\.|\\[')
path_segments = dot_pattern.split(path_to_root)
# construct base schema endpoint
schema_endpoint = self.schema
# reconstruct schema endpoint from segments
if path_segments[1]:
for i in range(1,len(path_segments)):
if item_pattern.match(path_segments[i]):
schema_endpoint = schema_endpoint[0]
else:
schema_endpoint = schema_endpoint[path_segments[i]]
return schema_endpoint
def _walk(self, path_to_root, record_dict):
'''
a helper method for finding the record endpoint from a path to root
:param path_to_root: string with dot path to root from
:param record_dict:
:return: list, dict, string, number, or boolean at path to root
'''
# split path to root into segments
item_pattern = re.compile('\d+\\]')
dot_pattern = re.compile('\\.|\\[')
path_segments = dot_pattern.split(path_to_root)
# construct empty fields
record_endpoints = []
# determine starting position
if not path_segments[0]:
path_segments.pop(0)
# define internal recursive function
def _walk_int(path_segments, record_dict):
record_endpoint = record_dict
for i in range(0, len(path_segments)):
if item_pattern.match(path_segments[i]):
for j in range(0, len(record_endpoint)):
if len(path_segments) == 2:
record_endpoints.append(record_endpoint[j])
else:
stop_chain = False
for x in range(0, i):
if item_pattern.match(path_segments[x]):
stop_chain = True
if not stop_chain:
shortened_segments = []
for z in range(i + 1, len(path_segments)):
shortened_segments.append(path_segments[z])
_walk_int(shortened_segments, record_endpoint[j])
else:
stop_chain = False
for y in range(0, i):
if item_pattern.match(path_segments[y]):
stop_chain = True
if not stop_chain:
if len(path_segments) == i + 1:
record_endpoints.append(record_endpoint[path_segments[i]])
else:
record_endpoint = record_endpoint[path_segments[i]]
# conduct recursive walk
_walk_int(path_segments, record_dict)
return record_endpoints
def validate(self, input_data, path_to_root='', object_title=''):
'''
a core method for validating input against the model
input_data is only returned if all data is valid
:param input_data: list, dict, string, number, or boolean to validate
:param path_to_root: [optional] string with dot-path of model component
:param object_title: [optional] string with name of input to validate
:return: input_data (or InputValidationError)
'''
__name__ = '%s.validate' % self.__class__.__name__
_path_arg = '%s(path_to_root="...")' % __name__
_title_arg = '%s(object_title="...")' % __name__
# validate input
copy_path = path_to_root
if path_to_root:
if not isinstance(path_to_root, str):
raise ModelValidationError('%s must be a string.' % _path_arg)
else:
if path_to_root[0] != '.':
copy_path = '.%s' % path_to_root
if not copy_path in self.keyMap.keys():
raise ModelValidationError('%s does not exist in components %s.' % (_path_arg.replace('...', path_to_root), self.keyMap.keys()))
else:
copy_path = '.'
if object_title:
if not isinstance(object_title, str):
raise ModelValidationError('%s must be a string' % _title_arg)
# construct generic error dictionary
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[copy_path],
'failed_test': 'value_datatype',
'input_path': copy_path,
'error_value': input_data,
'error_code': 4001
}
# determine value type of input data
try:
input_index = self._datatype_classes.index(input_data.__class__)
except:
error_dict['error_value'] = input_data.__class__.__name__
raise InputValidationError(error_dict)
input_type = self._datatype_names[input_index]
# validate input data type
if input_type != self.keyMap[copy_path]['value_datatype']:
raise InputValidationError(error_dict)
# run helper method appropriate to data type
if input_type == 'boolean':
input_data = self._validate_boolean(input_data, copy_path, object_title)
elif input_type == 'number':
input_data = self._validate_number(input_data, copy_path, object_title)
elif input_type == 'string':
input_data = self._validate_string(input_data, copy_path, object_title)
elif input_type == 'list':
schema_list = self._reconstruct(copy_path)
input_data = self._validate_list(input_data, schema_list, copy_path, object_title)
elif input_type == 'map':
schema_dict = self._reconstruct(copy_path)
input_data = self._validate_dict(input_data, schema_dict, copy_path, object_title)
return input_data
def ingest(self, **kwargs):
'''
a core method to ingest and validate arbitrary keyword data
**NOTE: data is always returned with this method**
for each key in the model, a value is returned according
to the following priority:
1. value in kwargs if field passes validation test
2. default value declared for the key in the model
3. empty value appropriate to datatype of key in the model
**NOTE: as long as a default value is provided for each key-
value, returned data will be model valid
**NOTE: if 'extra_fields' is True for a dictionary, the key-
value pair of all fields in kwargs which are not declared in
the model will also be added to the corresponding dictionary
data
**NOTE: if 'max_size' is declared for a list, method will
stop adding input to the list once it reaches max size
:param kwargs: key, value pairs
:return: dictionary with keys and value
'''
__name__ = '%s.ingest' % self.__class__.__name__
schema_dict = self.schema
path_to_root = '.'
valid_data = self._ingest_dict(kwargs, schema_dict, path_to_root)
return valid_data
def query(self, query_criteria, valid_record=None):
'''
a core method for querying model valid data with criteria
**NOTE: input is only returned if all fields & qualifiers are valid for model
:param query_criteria: dictionary with model field names and query qualifiers
:param valid_record: dictionary with model valid record
:return: boolean (or QueryValidationError)
an example of how to construct the query_criteria argument:
query_criteria = {
'.path.to.number': {
'min_value': 4.5
},
'.path.to.string': {
'must_contain': [ '\\regex' ]
}
}
**NOTE: for a full list of operators for query_criteria based upon field
datatype, see either the query-rules.json file or REFERENCE file
'''
__name__ = '%s.query' % self.__class__.__name__
_query_arg = '%s(query_criteria={...})' % __name__
_record_arg = '%s(valid_record={...})' % __name__
# validate input
if not isinstance(query_criteria, dict):
raise ModelValidationError('%s must be a dictionary.' % _query_arg)
# convert javascript dot_path to class dot_path
criteria_copy = {}
equal_fields = []
dot_fields = []
for key, value in query_criteria.items():
copy_key = key
if not key:
copy_key = '.'
else:
if key[0] != '.':
copy_key = '.%s' % key
dot_fields.append(copy_key)
criteria_copy[copy_key] = value
if value.__class__ in self._datatype_classes[0:4]:
criteria_copy[copy_key] = {
'equal_to': value
}
equal_fields.append(copy_key)
# validate query criteria against query rules
query_kwargs = {
'fields_dict': criteria_copy,
'fields_rules': self.queryRules,
'declared_value': False
}
try:
self._validate_fields(**query_kwargs)
except ModelValidationError as err:
message = err.error['message']
for field in equal_fields:
equal_error = 'field %s qualifier equal_to' % field
if message.find(equal_error) > -1:
message = message.replace(equal_error, 'field %s' % field)
break
field_pattern = re.compile('ield\s(\..*?)\s')
field_name = field_pattern.findall(message)
if field_name:
if field_name[0] in dot_fields:
def _replace_field(x):
return 'ield %s ' % x.group(1)[1:]
message = field_pattern.sub(_replace_field, message)
raise QueryValidationError(message)
# query test record
if valid_record:
if not isinstance(valid_record, dict):
raise ModelValidationError('%s must be a dictionary.' % _record_arg)
for key, value in criteria_copy.items():
eval_outcome = self._evaluate_field(valid_record, key, value)
if not eval_outcome:
return False
return True
|
collectiveacuity/jsonModel
|
jsonmodel/validators.py
|
jsonModel._validate_number
|
python
|
def _validate_number(self, input_number, path_to_root, object_title=''):
'''
a helper method for validating properties of a number
:return: input_number
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_number,
'error_code': 4001
}
if 'integer_data' in input_criteria.keys():
if input_criteria['integer_data'] and not isinstance(input_number, int):
error_dict['failed_test'] = 'integer_data'
error_dict['error_code'] = 4021
raise InputValidationError(error_dict)
if 'min_value' in input_criteria.keys():
if input_number < input_criteria['min_value']:
error_dict['failed_test'] = 'min_value'
error_dict['error_code'] = 4022
raise InputValidationError(error_dict)
if 'max_value' in input_criteria.keys():
if input_number > input_criteria['max_value']:
error_dict['failed_test'] = 'max_value'
error_dict['error_code'] = 4023
raise InputValidationError(error_dict)
if 'greater_than' in input_criteria.keys():
if input_number <= input_criteria['greater_than']:
error_dict['failed_test'] = 'greater_than'
error_dict['error_code'] = 4024
raise InputValidationError(error_dict)
if 'less_than' in input_criteria.keys():
if input_number >= input_criteria['less_than']:
error_dict['failed_test'] = 'less_than'
error_dict['error_code'] = 4025
raise InputValidationError(error_dict)
if 'equal_to' in input_criteria.keys():
if input_number != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
if 'discrete_values' in input_criteria.keys():
if input_number not in input_criteria['discrete_values']:
error_dict['failed_test'] = 'discrete_values'
error_dict['error_code'] = 4041
raise InputValidationError(error_dict)
if 'excluded_values' in input_criteria.keys():
if input_number in input_criteria['excluded_values']:
error_dict['failed_test'] = 'excluded_values'
error_dict['error_code'] = 4042
raise InputValidationError(error_dict)
# TODO: validate number against identical to reference
# TODO: run lambda function and call validation url
return input_number
|
a helper method for validating properties of a number
:return: input_number
|
train
|
https://github.com/collectiveacuity/jsonModel/blob/1ea64c36d78add3faa7b85ff82c5ec685458c940/jsonmodel/validators.py#L1000-L1064
| null |
class jsonModel(object):
__rules__ = jsonLoader('jsonmodel', 'models/model-rules.json')
def __init__(self, data_model, query_rules=None):
'''
a method for testing data model declaration & initializing the class
:param data_model: dictionary with json model architecture
:param query_rules: [optional] dictionary with valid field type qualifiers
:return: object with jsonModel methods
'''
# validate schema input
if not isinstance(data_model, dict):
raise ModelValidationError('Data model must be a dictionary.')
elif 'schema' not in data_model.keys():
raise ModelValidationError('Data model must have a schema key.')
elif not isinstance(data_model['schema'], dict):
raise ModelValidationError('Value for the data model "schema" field must be a dictionary.')
elif not data_model['schema']:
raise ModelValidationError('Data model "schema" field must not be empty.')
# construct base methods
from copy import deepcopy
data_model = deepcopy(data_model)
self.schema = data_model['schema']
model_map = mapModel(self.schema)
self.keyName = model_map.keyName
self.keyCriteria = model_map.keyCriteria
# construct protected type classes
self._datatype_names = mapModel._datatype_names
self._datatype_classes = mapModel._datatype_classes
# validate absence of item designators in keys
item_pattern = re.compile('\[\d+\]')
for i in range(len(self.keyName)):
patterns_found = item_pattern.findall(self.keyName[i])
if patterns_found:
for designator in patterns_found:
if designator != '[0]':
message = 'Key name for schema field %s must not contain the item designator pattern %s' % (self.keyName[i], designator)
raise ModelValidationError(message)
# validate existence of first item in list declarations
key_set = set(self.keyName)
for i in range(len(self.keyName)):
if self.keyCriteria[i]['value_datatype'] == 'list':
item_key = self.keyName[i] + '[0]'
if not item_key in key_set:
message = 'Schema field %s must declare an initial item for the list.' % self.keyName[i]
raise ModelValidationError(message)
# alter list requirement if first item is empty
else:
item_index = self.keyName.index(item_key)
if not self.keyCriteria[item_index]['required_field']:
self.keyCriteria[i]['required_field'] = False
# validate title input & construct title method
self.title = ''
if 'title' in data_model.keys():
if not isinstance(data_model['title'], str):
raise ModelValidationError('Value for model title must be a string.')
self.title = data_model['title']
# validate description input & construct description method
self.description = ''
if 'description' in data_model.keys():
if not isinstance(data_model['description'], str):
raise ModelValidationError('Value for model description must be a string.')
self.description = data_model['description']
# validate url input & construct title method
self.url = ''
if 'url' in data_model.keys():
if not isinstance(data_model['url'], str):
raise ModelValidationError('Value for model url must be a string.')
self.title = data_model['url']
# validate metadata input & construct metadata method
self.metadata = {}
if 'metadata' in data_model.keys():
if not isinstance(data_model['metadata'], dict):
raise ModelValidationError('Value for model metadata must be a dictionary.')
self.metadata = data_model['metadata']
# validate max size input & construct maxSize property
# self.maxSize = None
# if 'max_size' in data_model.keys():
# if not isinstance(data_model['max_size'], int):
# raise ModelValidationError('Value for model max_size must be a positive integer.')
# elif data_model['max_size'] < 0:
# raise ModelValidationError('Value for model max_size must be a positive integer.')
# elif data_model['max_size']:
# self.maxSize = data_model['max_size']
# validate components input & construct component property
self.components = {}
if 'components' in data_model.keys():
if not isinstance(data_model['components'], dict):
raise ModelValidationError('Value for model components must be a dictionary.')
self.components = self._validate_fields(data_model['components'], self.__rules__['components'])
# construct keyMap fields from key names and key criteria
self.keyMap = {}
for i in range(len(self.keyName)):
self.keyMap[self.keyName[i]] = self.keyCriteria[i]
for key, value in self.components.items():
# convert javascript dot_path to class dot_path
dot_key = ''
if not key:
dot_key = '.'
else:
if key[0] != '.':
dot_key = '.%s' % key
# add component declarations to keyMap
if key in self.keyMap.keys():
for k, v in self.components[key].items():
self.keyMap[key][k] = v
elif dot_key and dot_key in self.keyMap.keys():
for k, v in self.components[key].items():
self.keyMap[dot_key][k] = v
# validate default values in lists
self._validate_defaults(self.keyMap)
# construct queryRules property from class model rules
self.queryRules = {}
for key, value in self.__rules__['components'].items():
remove_from_query = [ 'required_field', 'default_value', 'example_values', 'field_title', 'field_position', 'field_description', 'field_metadata', 'extra_fields' ]
field_qualifiers = {
'value_exists': False
}
for k, v in value.items():
if k not in remove_from_query:
field_qualifiers[k] = v
self.queryRules[key] = field_qualifiers
# validate query rules input and replace queryRules property
if query_rules:
if not isinstance(query_rules, dict):
message = 'Value for query rules input must be a dictionary.'
raise ModelValidationError(message)
input_set = set(query_rules.keys())
req_set = set(self.queryRules.keys())
if input_set - req_set:
message = 'Query rules input may only have %s field key names.' % req_set
raise ModelValidationError(message)
elif req_set - input_set:
message = 'Query rules input must have all %s field key names.' % req_set
raise ModelValidationError(message)
for key in req_set:
if not isinstance(query_rules[key], dict):
message = 'Value for query rules %s field must be a dictionary.' % key
raise ModelValidationError(message)
input_qualifier_set = set(query_rules[key].keys())
req_qualifier_set = set(self.queryRules[key].keys())
if input_qualifier_set - req_qualifier_set:
message = 'Query rules field %s may only have qualifiers %s' % (key, req_qualifier_set)
raise ModelValidationError(message)
for k, v in query_rules[key].items():
if v.__class__ != self.queryRules[key][k].__class__:
qualifier_index = self._datatype_classes.index(self.queryRules[key][k].__class__)
qualifier_type = self._datatype_names[qualifier_index]
message = 'Value for query rules field %s qualifier %s must be a "%s" datatype.' % (key, k, qualifier_type)
raise ModelValidationError(message)
self.queryRules = query_rules
def _validate_fields(self, fields_dict, fields_rules, declared_value=True):
# validate key names in fields
for key, value in fields_dict.items():
# convert javascript dot_path to class dot_path
if not key:
key = '.'
else:
if key[0] != '.':
key = '.%s' % key
if key not in self.keyName:
raise ModelValidationError('Field %s is not a field declared in model schema.' % key)
elif not isinstance(value, dict):
raise ModelValidationError('Value for field %s must be a dictionary.' % key)
# validate field criteria are appropriate to field datatype
value_type = self.keyCriteria[self.keyName.index(key)]['value_datatype']
type_dict = {}
if value_type == 'string':
type_dict = fields_rules['.string_fields']
elif value_type == 'number':
type_dict = fields_rules['.number_fields']
elif value_type == 'boolean':
type_dict = fields_rules['.boolean_fields']
elif value_type == 'list':
type_dict = fields_rules['.list_fields']
elif value_type == 'map':
type_dict = fields_rules['.map_fields']
elif value_type == 'null':
type_dict = fields_rules['.null_fields']
if set(value.keys()) - set(type_dict.keys()):
raise ModelValidationError('Field %s may only have datatype %s qualifiers %s.' % (key, value_type, set(type_dict.keys())))
# validate criteria qualifier values are appropriate datatype
for k, v in value.items():
v_index = self._datatype_classes.index(v.__class__)
v_type = self._datatype_names[v_index]
qualifier_index = self._datatype_classes.index(type_dict[k].__class__)
qualifier_type = self._datatype_names[qualifier_index]
if v_type != qualifier_type:
message = 'Value for field %s qualifier %s must be a %s datatype.' % (key, k, qualifier_type)
raise ModelValidationError(message)
if qualifier_type == 'number':
if isinstance(type_dict[k], int):
if not isinstance(v, int):
message = 'Value for field %s qualifier %s must be an integer.' % (key, k)
raise ModelValidationError(message)
# validate internal logic of each qualifier value declaration
if k in ('must_not_contain', 'must_contain', 'contains_either'):
for item in v:
if not isinstance(item, str):
message = 'Each item in list field %s qualifier %s must be a string.' % (key, k)
raise ModelValidationError(message)
if k in ('min_length', 'max_length', 'min_size', 'max_size'):
if v < 0:
message = 'Value for field %s qualifier %s cannot be negative.' % (key, k)
raise ModelValidationError(message)
if k in ('discrete_values', 'excluded_values', 'example_values'):
for item in v:
if value_type == 'number':
if not isinstance(item, int) and not isinstance(item, float):
message = 'Each item in field %s qualifier %s list must be a number.' % (key, k)
raise ModelValidationError(message)
elif not isinstance(item, str):
message = 'Each item in list for field %s qualifier %s must be a string.' % (key, k)
raise ModelValidationError(message)
if k == 'identical_to':
if not v in self.keyName:
message = 'Value "%s" for field %s qualifier %s not found in components keys.' % (v, key, k)
raise ModelValidationError(message)
if k == 'unique_values':
if v:
item_name = key + '[0]'
item_type = self.keyCriteria[self.keyName.index(item_name)]['value_datatype']
if not item_type in ('number', 'string'):
message = 'Field %s[0] must be either a string or number if qualifier "unique_values": true' % key
raise ModelValidationError(message)
# validate lack of other qualifiers if value exist is false
if 'value_exists' in value.keys():
if not value['value_exists']:
if set(value.keys()) - {'value_exists'}:
message = 'If field %s qualifier value_exists: false, field may not have other qualifiers.' % key
raise ModelValidationError(message)
# validate size qualifiers against each other
size_qualifiers = ['min_size', 'max_size']
for qualifier in size_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
header = 'Value %s for %s' % (test_value, value_path)
if 'min_size' in value.keys():
if test_value < value['min_size']:
message = '%s must not be less than "min_size": %s' % (header, value['min_size'])
raise ModelValidationError(message)
if 'max_size' in value.keys():
if test_value > value['max_size']:
message = '%s must not be greater than "max_size": %s' % (header, value['max_size'])
raise ModelValidationError(message)
# validate length qualifiers against each other
length_qualifiers = ['min_length', 'max_length']
for qualifier in length_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
header = 'Value %s for %s' % (test_value, value_path)
if 'min_length' in value.keys():
if test_value < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if test_value > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
# validate range qualifiers against each other & length qualifiers
range_qualifiers = ['min_value', 'max_value', 'greater_than', 'less_than', 'equal_to']
for qualifier in range_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
header = 'Value %s%s%s for %s' % (quote_text, test_value, quote_text, value_path)
if 'min_value' in value.keys():
if test_value < value['min_value']:
message = '%s must not be less than "min_value": %s' % (header, value['min_value'])
raise ModelValidationError(message)
if 'max_value' in value.keys():
if test_value > value['max_value']:
message = '%s must not be greater than "max_value": %s' % (header, value['max_value'])
raise ModelValidationError(message)
if 'greater_than' in value.keys():
if test_value <= value['greater_than'] and not qualifier == 'greater_than':
message = '%s must be "greater_than": %s' % (header, value['greater_than'])
raise ModelValidationError(message)
if 'less_than' in value.keys():
if test_value >= value['less_than'] and not qualifier == 'less_than':
message = '%s must be "less_than": %s' % (header, value['less_than'])
raise ModelValidationError(message)
if 'min_length' in value.keys():
if len(test_value) < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if len(test_value) > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
if 'integer_data' in value.keys():
if value['integer_data']:
if not isinstance(test_value, int):
message = '%s must be an "integer_data".' % header
raise ModelValidationError(message)
if 'must_not_contain' in value.keys():
for regex in value['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
message = '%s matches regex pattern in "must_not_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'must_contain' in value.keys():
for regex in value['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(test_value):
message = '%s does not match regex pattern in "must_contain": ["%s"].' % (header, regex)
raise ModelValidationError(message)
if 'contains_either' in value.keys():
regex_match = False
regex_patterns = []
for regex in value['contains_either']:
regex_patterns.append(regex)
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
regex_match = True
if not regex_match:
message = '%s does not match any regex patterns in "contains_either": %s' % (header, regex_patterns)
raise ModelValidationError(message)
if 'byte_data' in value.keys():
if value['byte_data']:
if qualifier != 'equal_to':
message = '%s cannot be used with base64 encoded "byte_data".' % header
raise ModelValidationError(message)
# validate discrete value qualifiers against other criteria
schema_field = self.keyCriteria[self.keyName.index(key)]
discrete_qualifiers = ['declared_value', 'default_value', 'excluded_values', 'discrete_values', 'example_values']
for qualifier in discrete_qualifiers:
test_qualifier = False
if qualifier in schema_field:
test_qualifier = True
if qualifier == 'declared_value' and not schema_field[qualifier]:
test_qualifier = False
if qualifier in value.keys() or (test_qualifier and declared_value):
multiple_values = False
if qualifier in value.keys():
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
else:
test_list = [schema_field[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
header = 'Value %s%s%s for %s%s' % (quote_text, test_value, quote_text, value_path, item_text)
if 'min_value' in value.keys():
if test_value < value['min_value']:
message = '%s must not be less than "min_value": %s' % (header, value['min_value'])
raise ModelValidationError(message)
if 'max_value' in value.keys():
if test_value > value['max_value']:
message = '%s must not be greater than "max_value": %s' % (header, value['max_value'])
raise ModelValidationError(message)
if 'equal_to' in value.keys():
if test_value != value['equal_to']:
if qualifier != 'declared_value' and isinstance(test_value, bool):
message = '%s must be "equal_to": %s' % (header, value['equal_to'])
raise ModelValidationError(message)
if 'greater_than' in value.keys():
if test_value <= value['greater_than']:
message = '%s must be "greater_than": %s' % (header, value['greater_than'])
raise ModelValidationError(message)
if 'less_than' in value.keys():
if test_value >= value['less_than']:
message = '%s must be "less_than": %s' % (header, value['less_than'])
raise ModelValidationError(message)
if 'integer_data' in value.keys():
if value['integer_data']:
if not isinstance(test_value, int):
message = '%s must be an "integer_data".' % header
raise ModelValidationError(message)
if 'min_length' in value.keys():
if len(test_value) < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if len(test_value) > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
if 'must_not_contain' in value.keys():
for regex in value['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
message = '%s matches regex pattern in "must_not_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'must_contain' in value.keys():
for regex in value['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(test_value):
message = '%s does not match regex pattern in "must_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'contains_either' in value.keys():
regex_match = False
regex_patterns = []
for regex in value['contains_either']:
regex_patterns.append(regex)
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
regex_match = True
if not regex_match:
message = '%s does not match any regex patterns in "contains_either": %s' % (header, regex_patterns)
raise ModelValidationError(message)
if 'byte_data' in value.keys():
message = '%s cannot be base64 decoded to "byte_data".' % header
try:
decoded_bytes = b64decode(test_value)
except:
raise ModelValidationError(message)
if not isinstance(decoded_bytes, bytes):
raise ModelValidationError(message)
# validate discrete value qualifiers against each other
for qualifier in discrete_qualifiers:
test_qualifier = False
if qualifier in schema_field:
test_qualifier = True
if qualifier == 'declared_value' and not schema_field[qualifier]:
test_qualifier = False
if qualifier in value.keys() or (test_qualifier and declared_value):
multiple_values = False
if qualifier in value.keys():
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
else:
test_list = [schema_field[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
header = 'Value %s%s%s for %s%s' % (quote_text, test_value, quote_text, value_path, item_text)
if 'excluded_values' in value.keys():
if not qualifier == 'excluded_values':
if test_value in value['excluded_values']:
message = '%s cannot be one of "excluded_values": %s.' % (header, value['excluded_values'])
raise ModelValidationError(message)
if 'discrete_values' in value.keys():
if not qualifier == 'excluded_values':
if test_value not in value['discrete_values']:
message = '%s must be one of "discrete_values": %s' % (header, value['discrete_values'])
raise ModelValidationError(message)
return fields_dict
def _validate_defaults(self, fields_dict):
# validate key names in fields
for key, value in fields_dict.items():
# retrieve value type and type dict
value_type = self.keyCriteria[self.keyName.index(key)]['value_datatype']
# validate discrete value qualifiers against other criteria
qualifier = 'default_value'
if qualifier in value.keys():
multiple_values = False
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
qualifier_text = value_path + item_text
header = 'Value %s%s%s for %s' % (quote_text, test_value, quote_text, qualifier_text)
if value_type == 'list':
default_item_key = '%s[0]' % key
try:
self.validate(test_value, default_item_key, object_title=header)
except Exception as err:
raise ModelValidationError(str(err).strip().replace('field %s' % default_item_key, qualifier_text))
return fields_dict
def _evaluate_field(self, record_dict, field_name, field_criteria):
''' a helper method for evaluating record values based upon query criteria
:param record_dict: dictionary with model valid data to evaluate
:param field_name: string with path to root of query field
:param field_criteria: dictionary with query operators and qualifiers
:return: boolean (True if no field_criteria evaluate to false)
'''
# determine value existence criteria
value_exists = True
if 'value_exists' in field_criteria.keys():
if not field_criteria['value_exists']:
value_exists = False
# validate existence of field
field_exists = True
try:
record_values = self._walk(field_name, record_dict)
except:
field_exists = False
# evaluate existence query criteria
if value_exists != field_exists:
return False
elif not value_exists:
return True
# convert javascript dot_path to class dot_path
field_key = field_name
if not field_name:
field_key = '.'
else:
if field_name[0] != '.':
field_key = '.%s' % field_name
# evaluate other query criteria
for key, value in field_criteria.items():
if key in ('min_size', 'min_length'):
found = False
if self.keyMap[field_key]['value_datatype'] == 'map':
for record_value in record_values:
record_size = sys.getsizeof(json.dumps(str(record_value)).replace(' ','')) - 51
if record_size >= value:
found = True
break
else:
for record_value in record_values:
if len(record_value) >= value:
found = True
break
if not found:
return False
elif key in ('max_size', 'max_length'):
found = False
if self.keyMap[field_key]['value_datatype'] == 'map':
for record_value in record_values:
record_size = sys.getsizeof(json.dumps(str(record_value)).replace(' ','')) - 51
if record_size <= value:
found = True
break
else:
for record_value in record_values:
if len(record_value) <= value:
found = True
break
if not found:
return False
elif key == 'min_value':
found = False
for record_value in record_values:
if record_value >= value:
found = True
break
if not found:
return False
elif key == 'max_value':
found = False
for record_value in record_values:
if record_value <= value:
found = True
break
if not found:
return False
elif key == 'equal_to':
found = False
for record_value in record_values:
if record_value == value:
found = True
break
if not found:
return False
elif key == 'greater_than':
found = False
for record_value in record_values:
if record_value > value:
found = True
break
if not found:
return False
elif key == 'less_than':
found = False
for record_value in record_values:
if record_value < value:
found = True
break
if not found:
return False
elif key == 'excluded_values':
for record_value in record_values:
if record_value in value:
return False
elif key == 'discrete_values':
found = False
for record_value in record_values:
if record_value in value:
found = True
break
if not found:
return False
elif key == 'integer_data':
found = False
dummy_int = 1
for record_value in record_values:
if record_value.__class__ == dummy_int.__class__:
found = True
break
if value != found:
return False
elif key == 'byte_data':
found = False
for record_value in record_values:
try:
decoded_bytes = b64decode(record_value)
except:
decoded_bytes = ''
if isinstance(decoded_bytes, bytes):
found = True
break
if value != found:
return False
elif key == 'must_contain':
for regex in value:
regex_pattern = re.compile(regex)
found = False
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if not found:
return False
elif key == 'must_not_contain':
for regex in value:
regex_pattern = re.compile(regex)
found = False
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if found:
return False
elif key == 'contains_either':
found = False
for regex in value:
regex_pattern = re.compile(regex)
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if found:
break
if not found:
return False
elif key == 'unique_values':
for record_value in record_values:
unique_values = True
if len(record_value) != len(set(record_value)):
unique_values = False
if value != unique_values:
return False
return True
def _validate_dict(self, input_dict, schema_dict, path_to_root, object_title=''):
''' a helper method for recursively validating keys in dictionaries
:return input_dict
'''
# reconstruct key path to current dictionary in model
rules_top_level_key = re.sub('\[\d+\]', '[0]', path_to_root)
map_rules = self.keyMap[rules_top_level_key]
# construct list error report template
map_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': map_rules,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': 0,
'error_code': 4001
}
# validate map size
if 'min_size' in map_rules.keys():
input_size = sys.getsizeof(json.dumps(str(input_dict)).replace(' ','')) - 51
if input_size < map_rules['min_size']:
map_error['failed_test'] = 'min_size'
map_error['error_value'] = input_size
map_error['error_code'] = 4031
raise InputValidationError(map_error)
if 'max_size' in map_rules.keys():
input_size = sys.getsizeof(json.dumps(str(input_dict)).replace(' ','')) - 51
if input_size > map_rules['max_size']:
map_error['failed_test'] = 'max_size'
map_error['error_value'] = input_size
map_error['error_code'] = 4032
raise InputValidationError(map_error)
# construct lists of keys in input dictionary
input_keys = []
input_key_list = []
for key in input_dict.keys():
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'key_datatype',
'input_path': path_to_root,
'error_value': key,
'error_code': 4004
}
error_dict['input_criteria']['key_datatype'] = 'string'
if path_to_root == '.':
if not isinstance(key, str):
input_key_name = path_to_root + str(key)
error_dict['input_path'] = input_key_name
raise InputValidationError(error_dict)
input_key_name = path_to_root + key
else:
if not isinstance(key, str):
input_key_name = path_to_root + '.' + str(key)
error_dict['input_path'] = input_key_name
raise InputValidationError(error_dict)
input_key_name = path_to_root + '.' + key
input_keys.append(input_key_name)
input_key_list.append(key)
# TODO: validate top-level key and values against identical to reference
# TODO: run lambda function and call validation
# construct lists of keys in schema dictionary
max_keys = []
max_key_list = []
req_keys = []
req_key_list = []
for key in schema_dict.keys():
if path_to_root == '.':
schema_key_name = path_to_root + key
else:
schema_key_name = path_to_root + '.' + key
max_keys.append(schema_key_name)
max_key_list.append(key)
rules_schema_key_name = re.sub('\[\d+\]', '[0]', schema_key_name)
if self.keyMap[rules_schema_key_name]['required_field']:
req_keys.append(schema_key_name)
req_key_list.append(key)
# validate existence of required fields
missing_keys = set(req_keys) - set(input_keys)
if missing_keys:
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'required_field',
'input_path': path_to_root,
'error_value': list(missing_keys),
'error_code': 4002
}
error_dict['input_criteria']['required_keys'] = req_keys
raise InputValidationError(error_dict)
# validate existence of extra fields
extra_keys = set(input_keys) - set(max_keys)
if extra_keys and not self.keyMap[rules_top_level_key]['extra_fields']:
extra_key_list = []
for key in extra_keys:
pathless_key = re.sub(rules_top_level_key, '', key, count=1)
extra_key_list.append(pathless_key)
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'extra_fields',
'input_path': path_to_root,
'error_value': extra_key_list,
'error_code': 4003
}
error_dict['input_criteria']['maximum_scope'] = max_key_list
raise InputValidationError(error_dict)
# validate datatype of value
for key, value in input_dict.items():
if path_to_root == '.':
input_key_name = path_to_root + key
else:
input_key_name = path_to_root + '.' + key
rules_input_key_name = re.sub('\[\d+\]', '[0]', input_key_name)
if input_key_name in max_keys:
input_criteria = self.keyMap[rules_input_key_name]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': input_key_name,
'error_value': value,
'error_code': 4001
}
try:
value_index = self._datatype_classes.index(value.__class__)
except:
error_dict['error_value'] = value.__class__.__name__
raise InputValidationError(error_dict)
value_type = self._datatype_names[value_index]
if input_criteria['value_datatype'] == 'null':
pass
else:
if value_type != input_criteria['value_datatype']:
raise InputValidationError(error_dict)
# call appropriate validation sub-routine for datatype of value
if value_type == 'boolean':
input_dict[key] = self._validate_boolean(value, input_key_name, object_title)
elif value_type == 'number':
input_dict[key] = self._validate_number(value, input_key_name, object_title)
elif value_type == 'string':
input_dict[key] = self._validate_string(value, input_key_name, object_title)
elif value_type == 'map':
input_dict[key] = self._validate_dict(value, schema_dict[key], input_key_name, object_title)
elif value_type == 'list':
input_dict[key] = self._validate_list(value, schema_dict[key], input_key_name, object_title)
# set default values for empty optional fields
for key in max_key_list:
if key not in input_key_list:
indexed_key = max_keys[max_key_list.index(key)]
if indexed_key in self.components.keys():
if 'default_value' in self.components[indexed_key]:
input_dict[key] = self.components[indexed_key]['default_value']
return input_dict
def _validate_list(self, input_list, schema_list, path_to_root, object_title=''):
'''
a helper method for recursively validating items in a list
:return: input_list
'''
# construct rules for list and items
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
list_rules = self.keyMap[rules_path_to_root]
initial_key = rules_path_to_root + '[0]'
item_rules = self.keyMap[initial_key]
# construct list error report template
list_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': list_rules,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': 0,
'error_code': 4001
}
# validate list rules
if 'min_size' in list_rules.keys():
if len(input_list) < list_rules['min_size']:
list_error['failed_test'] = 'min_size'
list_error['error_value'] = len(input_list)
list_error['error_code'] = 4031
raise InputValidationError(list_error)
if 'max_size' in list_rules.keys():
if len(input_list) > list_rules['max_size']:
list_error['failed_test'] = 'max_size'
list_error['error_value'] = len(input_list)
list_error['error_code'] = 4032
raise InputValidationError(list_error)
# construct item error report template
item_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': item_rules,
'failed_test': 'value_datatype',
'input_path': initial_key,
'error_value': None,
'error_code': 4001
}
# validate datatype of items
for i in range(len(input_list)):
input_path = path_to_root + '[%s]' % i
item = input_list[i]
item_error['input_path'] = input_path
try:
item_index = self._datatype_classes.index(item.__class__)
except:
item_error['error_value'] = item.__class__.__name__
raise InputValidationError(item_error)
item_type = self._datatype_names[item_index]
item_error['error_value'] = item
if item_rules['value_datatype'] == 'null':
pass
else:
if item_type != item_rules['value_datatype']:
raise InputValidationError(item_error)
# call appropriate validation sub-routine for datatype of item
if item_type == 'boolean':
input_list[i] = self._validate_boolean(item, input_path, object_title)
elif item_type == 'number':
input_list[i] = self._validate_number(item, input_path, object_title)
elif item_type == 'string':
input_list[i] = self._validate_string(item, input_path, object_title)
elif item_type == 'map':
input_list[i] = self._validate_dict(item, schema_list[0], input_path, object_title)
elif item_type == 'list':
input_list[i] = self._validate_list(item, schema_list[0], input_path, object_title)
# validate unique values in list
if 'unique_values' in list_rules.keys():
if len(set(input_list)) < len(input_list):
list_error['failed_test'] = 'unique_values'
list_error['error_value'] = input_list
list_error['error_code'] = 4033
raise InputValidationError(list_error)
# TODO: validate top-level item values against identical to reference
# TODO: run lambda function and call validation url
return input_list
def _validate_string(self, input_string, path_to_root, object_title=''):
'''
a helper method for validating properties of a string
:return: input_string
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_string,
'error_code': 4001
}
if 'byte_data' in input_criteria.keys():
if input_criteria['byte_data']:
error_dict['failed_test'] = 'byte_data'
error_dict['error_code'] = 4011
try:
decoded_bytes = b64decode(input_string)
except:
raise InputValidationError(error_dict)
if not isinstance(decoded_bytes, bytes):
raise InputValidationError(error_dict)
if 'min_value' in input_criteria.keys():
if input_string < input_criteria['min_value']:
error_dict['failed_test'] = 'min_value'
error_dict['error_code'] = 4022
raise InputValidationError(error_dict)
if 'max_value' in input_criteria.keys():
if input_string > input_criteria['max_value']:
error_dict['failed_test'] = 'max_value'
error_dict['error_code'] = 4023
raise InputValidationError(error_dict)
if 'greater_than' in input_criteria.keys():
if input_string <= input_criteria['greater_than']:
error_dict['failed_test'] = 'greater_than'
error_dict['error_code'] = 4024
raise InputValidationError(error_dict)
if 'less_than' in input_criteria.keys():
if input_string >= input_criteria['less_than']:
error_dict['failed_test'] = 'less_than'
error_dict['error_code'] = 4025
raise InputValidationError(error_dict)
if 'equal_to' in input_criteria.keys():
if input_string != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
if 'min_length' in input_criteria.keys():
if len(input_string) < input_criteria['min_length']:
error_dict['failed_test'] = 'min_length'
error_dict['error_code'] = 4012
raise InputValidationError(error_dict)
if 'max_length' in input_criteria.keys():
if len(input_string) > input_criteria['max_length']:
error_dict['failed_test'] = 'max_length'
error_dict['error_code'] = 4013
raise InputValidationError(error_dict)
if 'must_not_contain' in input_criteria.keys():
for regex in input_criteria['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(input_string):
error_dict['failed_test'] = 'must_not_contain'
error_dict['error_code'] = 4014
raise InputValidationError(error_dict)
if 'must_contain' in input_criteria.keys():
for regex in input_criteria['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(input_string):
error_dict['failed_test'] = 'must_contain'
error_dict['error_code'] = 4015
raise InputValidationError(error_dict)
if 'contains_either' in input_criteria.keys():
regex_match = False
for regex in input_criteria['contains_either']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(input_string):
regex_match = True
if not regex_match:
error_dict['failed_test'] = 'contains_either'
error_dict['error_code'] = 4016
raise InputValidationError(error_dict)
if 'discrete_values' in input_criteria.keys():
if input_string not in input_criteria['discrete_values']:
error_dict['failed_test'] = 'discrete_values'
error_dict['error_code'] = 4041
raise InputValidationError(error_dict)
if 'excluded_values' in input_criteria.keys():
if input_string in input_criteria['excluded_values']:
error_dict['failed_test'] = 'excluded_values'
error_dict['error_code'] = 4042
raise InputValidationError(error_dict)
# TODO: validate string against identical to reference
# TODO: run lambda function and call validation url
return input_string
def _validate_boolean(self, input_boolean, path_to_root, object_title=''):
'''
a helper method for validating properties of a boolean
:return: input_boolean
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_boolean,
'error_code': 4001
}
if 'equal_to' in input_criteria.keys():
if input_boolean != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
# TODO: validate boolean against identical to reference
# TODO: run lambda function and call validation url
return input_boolean
def _ingest_dict(self, input_dict, schema_dict, path_to_root):
'''
a helper method for ingesting keys, value pairs in a dictionary
:return: valid_dict
'''
valid_dict = {}
# construct path to root for rules
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
# iterate over keys in schema dict
for key, value in schema_dict.items():
key_path = path_to_root
if not key_path == '.':
key_path += '.'
key_path += key
rules_key_path = re.sub('\[\d+\]', '[0]', key_path)
value_match = False
if key in input_dict.keys():
value_index = self._datatype_classes.index(value.__class__)
value_type = self._datatype_names[value_index]
try:
v_index = self._datatype_classes.index(input_dict[key].__class__)
v_type = self._datatype_names[v_index]
if v_type == value_type:
value_match = True
except:
value_match = False
if value_match:
if value_type == 'null':
valid_dict[key] = input_dict[key]
elif value_type == 'boolean':
valid_dict[key] = self._ingest_boolean(input_dict[key], key_path)
elif value_type == 'number':
valid_dict[key] = self._ingest_number(input_dict[key], key_path)
elif value_type == 'string':
valid_dict[key] = self._ingest_string(input_dict[key], key_path)
elif value_type == 'map':
valid_dict[key] = self._ingest_dict(input_dict[key], schema_dict[key], key_path)
elif value_type == 'list':
valid_dict[key] = self._ingest_list(input_dict[key], schema_dict[key], key_path)
else:
value_type = self.keyMap[rules_key_path]['value_datatype']
if 'default_value' in self.keyMap[rules_key_path]:
valid_dict[key] = self.keyMap[rules_key_path]['default_value']
elif value_type == 'null':
valid_dict[key] = None
elif value_type == 'boolean':
valid_dict[key] = False
elif value_type == 'number':
valid_dict[key] = 0.0
if 'integer_data' in self.keyMap[rules_key_path].keys():
if self.keyMap[rules_key_path]['integer_data']:
valid_dict[key] = 0
elif value_type == 'string':
valid_dict[key] = ''
elif value_type == 'list':
valid_dict[key] = []
elif value_type == 'map':
valid_dict[key] = self._ingest_dict({}, schema_dict[key], key_path)
# add extra fields if set to True
if self.keyMap[rules_path_to_root]['extra_fields']:
for key, value in input_dict.items():
if key not in valid_dict.keys():
valid_dict[key] = value
return valid_dict
def _ingest_list(self, input_list, schema_list, path_to_root):
'''
a helper method for ingesting items in a list
:return: valid_list
'''
valid_list = []
# construct max list size
max_size = None
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'max_size' in self.keyMap[rules_path_to_root].keys():
if not self.keyMap[rules_path_to_root]['max_size']:
return valid_list
else:
max_size = self.keyMap[rules_path_to_root]['max_size']
# iterate over items in input list
if input_list:
rules_index = self._datatype_classes.index(schema_list[0].__class__)
rules_type = self._datatype_names[rules_index]
for i in range(len(input_list)):
item_path = '%s[%s]' % (path_to_root, i)
value_match = False
try:
item_index = self._datatype_classes.index(input_list[i].__class__)
item_type = self._datatype_names[item_index]
if item_type == rules_type:
value_match = True
except:
value_match = False
if value_match:
try:
if item_type == 'boolean':
valid_list.append(self._validate_boolean(input_list[i], item_path))
elif item_type == 'number':
valid_list.append(self._validate_number(input_list[i], item_path))
elif item_type == 'string':
valid_list.append(self._validate_string(input_list[i], item_path))
elif item_type == 'map':
valid_list.append(self._ingest_dict(input_list[i], schema_list[0], item_path))
elif item_type == 'list':
valid_list.append(self._ingest_list(input_list[i], schema_list[0], item_path))
except:
pass
if isinstance(max_size, int):
if len(valid_list) == max_size:
return valid_list
return valid_list
def _ingest_number(self, input_number, path_to_root):
'''
a helper method for ingesting a number
:return: valid_number
'''
valid_number = 0.0
try:
valid_number = self._validate_number(input_number, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_number = self.keyMap[rules_path_to_root]['default_value']
elif 'integer_data' in self.keyMap[rules_path_to_root].keys():
if self.keyMap[rules_path_to_root]['integer_data']:
valid_number = 0
return valid_number
def _ingest_string(self, input_string, path_to_root):
'''
a helper method for ingesting a string
:return: valid_string
'''
valid_string = ''
try:
valid_string = self._validate_string(input_string, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_string = self.keyMap[rules_path_to_root]['default_value']
return valid_string
def _ingest_boolean(self, input_boolean, path_to_root):
'''
a helper method for ingesting a boolean
:return: valid_boolean
'''
valid_boolean = False
try:
valid_boolean = self._validate_boolean(input_boolean, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_boolean = self.keyMap[rules_path_to_root]['default_value']
return valid_boolean
def _reconstruct(self, path_to_root):
'''
a helper method for finding the schema endpoint from a path to root
:param path_to_root: string with dot path to root from
:return: list, dict, string, number, or boolean at path to root
'''
# split path to root into segments
item_pattern = re.compile('\d+\\]')
dot_pattern = re.compile('\\.|\\[')
path_segments = dot_pattern.split(path_to_root)
# construct base schema endpoint
schema_endpoint = self.schema
# reconstruct schema endpoint from segments
if path_segments[1]:
for i in range(1,len(path_segments)):
if item_pattern.match(path_segments[i]):
schema_endpoint = schema_endpoint[0]
else:
schema_endpoint = schema_endpoint[path_segments[i]]
return schema_endpoint
def _walk(self, path_to_root, record_dict):
'''
a helper method for finding the record endpoint from a path to root
:param path_to_root: string with dot path to root from
:param record_dict:
:return: list, dict, string, number, or boolean at path to root
'''
# split path to root into segments
item_pattern = re.compile('\d+\\]')
dot_pattern = re.compile('\\.|\\[')
path_segments = dot_pattern.split(path_to_root)
# construct empty fields
record_endpoints = []
# determine starting position
if not path_segments[0]:
path_segments.pop(0)
# define internal recursive function
def _walk_int(path_segments, record_dict):
record_endpoint = record_dict
for i in range(0, len(path_segments)):
if item_pattern.match(path_segments[i]):
for j in range(0, len(record_endpoint)):
if len(path_segments) == 2:
record_endpoints.append(record_endpoint[j])
else:
stop_chain = False
for x in range(0, i):
if item_pattern.match(path_segments[x]):
stop_chain = True
if not stop_chain:
shortened_segments = []
for z in range(i + 1, len(path_segments)):
shortened_segments.append(path_segments[z])
_walk_int(shortened_segments, record_endpoint[j])
else:
stop_chain = False
for y in range(0, i):
if item_pattern.match(path_segments[y]):
stop_chain = True
if not stop_chain:
if len(path_segments) == i + 1:
record_endpoints.append(record_endpoint[path_segments[i]])
else:
record_endpoint = record_endpoint[path_segments[i]]
# conduct recursive walk
_walk_int(path_segments, record_dict)
return record_endpoints
def validate(self, input_data, path_to_root='', object_title=''):
'''
a core method for validating input against the model
input_data is only returned if all data is valid
:param input_data: list, dict, string, number, or boolean to validate
:param path_to_root: [optional] string with dot-path of model component
:param object_title: [optional] string with name of input to validate
:return: input_data (or InputValidationError)
'''
__name__ = '%s.validate' % self.__class__.__name__
_path_arg = '%s(path_to_root="...")' % __name__
_title_arg = '%s(object_title="...")' % __name__
# validate input
copy_path = path_to_root
if path_to_root:
if not isinstance(path_to_root, str):
raise ModelValidationError('%s must be a string.' % _path_arg)
else:
if path_to_root[0] != '.':
copy_path = '.%s' % path_to_root
if not copy_path in self.keyMap.keys():
raise ModelValidationError('%s does not exist in components %s.' % (_path_arg.replace('...', path_to_root), self.keyMap.keys()))
else:
copy_path = '.'
if object_title:
if not isinstance(object_title, str):
raise ModelValidationError('%s must be a string' % _title_arg)
# construct generic error dictionary
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[copy_path],
'failed_test': 'value_datatype',
'input_path': copy_path,
'error_value': input_data,
'error_code': 4001
}
# determine value type of input data
try:
input_index = self._datatype_classes.index(input_data.__class__)
except:
error_dict['error_value'] = input_data.__class__.__name__
raise InputValidationError(error_dict)
input_type = self._datatype_names[input_index]
# validate input data type
if input_type != self.keyMap[copy_path]['value_datatype']:
raise InputValidationError(error_dict)
# run helper method appropriate to data type
if input_type == 'boolean':
input_data = self._validate_boolean(input_data, copy_path, object_title)
elif input_type == 'number':
input_data = self._validate_number(input_data, copy_path, object_title)
elif input_type == 'string':
input_data = self._validate_string(input_data, copy_path, object_title)
elif input_type == 'list':
schema_list = self._reconstruct(copy_path)
input_data = self._validate_list(input_data, schema_list, copy_path, object_title)
elif input_type == 'map':
schema_dict = self._reconstruct(copy_path)
input_data = self._validate_dict(input_data, schema_dict, copy_path, object_title)
return input_data
def ingest(self, **kwargs):
'''
a core method to ingest and validate arbitrary keyword data
**NOTE: data is always returned with this method**
for each key in the model, a value is returned according
to the following priority:
1. value in kwargs if field passes validation test
2. default value declared for the key in the model
3. empty value appropriate to datatype of key in the model
**NOTE: as long as a default value is provided for each key-
value, returned data will be model valid
**NOTE: if 'extra_fields' is True for a dictionary, the key-
value pair of all fields in kwargs which are not declared in
the model will also be added to the corresponding dictionary
data
**NOTE: if 'max_size' is declared for a list, method will
stop adding input to the list once it reaches max size
:param kwargs: key, value pairs
:return: dictionary with keys and value
'''
__name__ = '%s.ingest' % self.__class__.__name__
schema_dict = self.schema
path_to_root = '.'
valid_data = self._ingest_dict(kwargs, schema_dict, path_to_root)
return valid_data
def query(self, query_criteria, valid_record=None):
'''
a core method for querying model valid data with criteria
**NOTE: input is only returned if all fields & qualifiers are valid for model
:param query_criteria: dictionary with model field names and query qualifiers
:param valid_record: dictionary with model valid record
:return: boolean (or QueryValidationError)
an example of how to construct the query_criteria argument:
query_criteria = {
'.path.to.number': {
'min_value': 4.5
},
'.path.to.string': {
'must_contain': [ '\\regex' ]
}
}
**NOTE: for a full list of operators for query_criteria based upon field
datatype, see either the query-rules.json file or REFERENCE file
'''
__name__ = '%s.query' % self.__class__.__name__
_query_arg = '%s(query_criteria={...})' % __name__
_record_arg = '%s(valid_record={...})' % __name__
# validate input
if not isinstance(query_criteria, dict):
raise ModelValidationError('%s must be a dictionary.' % _query_arg)
# convert javascript dot_path to class dot_path
criteria_copy = {}
equal_fields = []
dot_fields = []
for key, value in query_criteria.items():
copy_key = key
if not key:
copy_key = '.'
else:
if key[0] != '.':
copy_key = '.%s' % key
dot_fields.append(copy_key)
criteria_copy[copy_key] = value
if value.__class__ in self._datatype_classes[0:4]:
criteria_copy[copy_key] = {
'equal_to': value
}
equal_fields.append(copy_key)
# validate query criteria against query rules
query_kwargs = {
'fields_dict': criteria_copy,
'fields_rules': self.queryRules,
'declared_value': False
}
try:
self._validate_fields(**query_kwargs)
except ModelValidationError as err:
message = err.error['message']
for field in equal_fields:
equal_error = 'field %s qualifier equal_to' % field
if message.find(equal_error) > -1:
message = message.replace(equal_error, 'field %s' % field)
break
field_pattern = re.compile('ield\s(\..*?)\s')
field_name = field_pattern.findall(message)
if field_name:
if field_name[0] in dot_fields:
def _replace_field(x):
return 'ield %s ' % x.group(1)[1:]
message = field_pattern.sub(_replace_field, message)
raise QueryValidationError(message)
# query test record
if valid_record:
if not isinstance(valid_record, dict):
raise ModelValidationError('%s must be a dictionary.' % _record_arg)
for key, value in criteria_copy.items():
eval_outcome = self._evaluate_field(valid_record, key, value)
if not eval_outcome:
return False
return True
|
collectiveacuity/jsonModel
|
jsonmodel/validators.py
|
jsonModel._validate_string
|
python
|
def _validate_string(self, input_string, path_to_root, object_title=''):
'''
a helper method for validating properties of a string
:return: input_string
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_string,
'error_code': 4001
}
if 'byte_data' in input_criteria.keys():
if input_criteria['byte_data']:
error_dict['failed_test'] = 'byte_data'
error_dict['error_code'] = 4011
try:
decoded_bytes = b64decode(input_string)
except:
raise InputValidationError(error_dict)
if not isinstance(decoded_bytes, bytes):
raise InputValidationError(error_dict)
if 'min_value' in input_criteria.keys():
if input_string < input_criteria['min_value']:
error_dict['failed_test'] = 'min_value'
error_dict['error_code'] = 4022
raise InputValidationError(error_dict)
if 'max_value' in input_criteria.keys():
if input_string > input_criteria['max_value']:
error_dict['failed_test'] = 'max_value'
error_dict['error_code'] = 4023
raise InputValidationError(error_dict)
if 'greater_than' in input_criteria.keys():
if input_string <= input_criteria['greater_than']:
error_dict['failed_test'] = 'greater_than'
error_dict['error_code'] = 4024
raise InputValidationError(error_dict)
if 'less_than' in input_criteria.keys():
if input_string >= input_criteria['less_than']:
error_dict['failed_test'] = 'less_than'
error_dict['error_code'] = 4025
raise InputValidationError(error_dict)
if 'equal_to' in input_criteria.keys():
if input_string != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
if 'min_length' in input_criteria.keys():
if len(input_string) < input_criteria['min_length']:
error_dict['failed_test'] = 'min_length'
error_dict['error_code'] = 4012
raise InputValidationError(error_dict)
if 'max_length' in input_criteria.keys():
if len(input_string) > input_criteria['max_length']:
error_dict['failed_test'] = 'max_length'
error_dict['error_code'] = 4013
raise InputValidationError(error_dict)
if 'must_not_contain' in input_criteria.keys():
for regex in input_criteria['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(input_string):
error_dict['failed_test'] = 'must_not_contain'
error_dict['error_code'] = 4014
raise InputValidationError(error_dict)
if 'must_contain' in input_criteria.keys():
for regex in input_criteria['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(input_string):
error_dict['failed_test'] = 'must_contain'
error_dict['error_code'] = 4015
raise InputValidationError(error_dict)
if 'contains_either' in input_criteria.keys():
regex_match = False
for regex in input_criteria['contains_either']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(input_string):
regex_match = True
if not regex_match:
error_dict['failed_test'] = 'contains_either'
error_dict['error_code'] = 4016
raise InputValidationError(error_dict)
if 'discrete_values' in input_criteria.keys():
if input_string not in input_criteria['discrete_values']:
error_dict['failed_test'] = 'discrete_values'
error_dict['error_code'] = 4041
raise InputValidationError(error_dict)
if 'excluded_values' in input_criteria.keys():
if input_string in input_criteria['excluded_values']:
error_dict['failed_test'] = 'excluded_values'
error_dict['error_code'] = 4042
raise InputValidationError(error_dict)
# TODO: validate string against identical to reference
# TODO: run lambda function and call validation url
return input_string
|
a helper method for validating properties of a string
:return: input_string
|
train
|
https://github.com/collectiveacuity/jsonModel/blob/1ea64c36d78add3faa7b85ff82c5ec685458c940/jsonmodel/validators.py#L1066-L1169
| null |
class jsonModel(object):
__rules__ = jsonLoader('jsonmodel', 'models/model-rules.json')
def __init__(self, data_model, query_rules=None):
'''
a method for testing data model declaration & initializing the class
:param data_model: dictionary with json model architecture
:param query_rules: [optional] dictionary with valid field type qualifiers
:return: object with jsonModel methods
'''
# validate schema input
if not isinstance(data_model, dict):
raise ModelValidationError('Data model must be a dictionary.')
elif 'schema' not in data_model.keys():
raise ModelValidationError('Data model must have a schema key.')
elif not isinstance(data_model['schema'], dict):
raise ModelValidationError('Value for the data model "schema" field must be a dictionary.')
elif not data_model['schema']:
raise ModelValidationError('Data model "schema" field must not be empty.')
# construct base methods
from copy import deepcopy
data_model = deepcopy(data_model)
self.schema = data_model['schema']
model_map = mapModel(self.schema)
self.keyName = model_map.keyName
self.keyCriteria = model_map.keyCriteria
# construct protected type classes
self._datatype_names = mapModel._datatype_names
self._datatype_classes = mapModel._datatype_classes
# validate absence of item designators in keys
item_pattern = re.compile('\[\d+\]')
for i in range(len(self.keyName)):
patterns_found = item_pattern.findall(self.keyName[i])
if patterns_found:
for designator in patterns_found:
if designator != '[0]':
message = 'Key name for schema field %s must not contain the item designator pattern %s' % (self.keyName[i], designator)
raise ModelValidationError(message)
# validate existence of first item in list declarations
key_set = set(self.keyName)
for i in range(len(self.keyName)):
if self.keyCriteria[i]['value_datatype'] == 'list':
item_key = self.keyName[i] + '[0]'
if not item_key in key_set:
message = 'Schema field %s must declare an initial item for the list.' % self.keyName[i]
raise ModelValidationError(message)
# alter list requirement if first item is empty
else:
item_index = self.keyName.index(item_key)
if not self.keyCriteria[item_index]['required_field']:
self.keyCriteria[i]['required_field'] = False
# validate title input & construct title method
self.title = ''
if 'title' in data_model.keys():
if not isinstance(data_model['title'], str):
raise ModelValidationError('Value for model title must be a string.')
self.title = data_model['title']
# validate description input & construct description method
self.description = ''
if 'description' in data_model.keys():
if not isinstance(data_model['description'], str):
raise ModelValidationError('Value for model description must be a string.')
self.description = data_model['description']
# validate url input & construct title method
self.url = ''
if 'url' in data_model.keys():
if not isinstance(data_model['url'], str):
raise ModelValidationError('Value for model url must be a string.')
self.title = data_model['url']
# validate metadata input & construct metadata method
self.metadata = {}
if 'metadata' in data_model.keys():
if not isinstance(data_model['metadata'], dict):
raise ModelValidationError('Value for model metadata must be a dictionary.')
self.metadata = data_model['metadata']
# validate max size input & construct maxSize property
# self.maxSize = None
# if 'max_size' in data_model.keys():
# if not isinstance(data_model['max_size'], int):
# raise ModelValidationError('Value for model max_size must be a positive integer.')
# elif data_model['max_size'] < 0:
# raise ModelValidationError('Value for model max_size must be a positive integer.')
# elif data_model['max_size']:
# self.maxSize = data_model['max_size']
# validate components input & construct component property
self.components = {}
if 'components' in data_model.keys():
if not isinstance(data_model['components'], dict):
raise ModelValidationError('Value for model components must be a dictionary.')
self.components = self._validate_fields(data_model['components'], self.__rules__['components'])
# construct keyMap fields from key names and key criteria
self.keyMap = {}
for i in range(len(self.keyName)):
self.keyMap[self.keyName[i]] = self.keyCriteria[i]
for key, value in self.components.items():
# convert javascript dot_path to class dot_path
dot_key = ''
if not key:
dot_key = '.'
else:
if key[0] != '.':
dot_key = '.%s' % key
# add component declarations to keyMap
if key in self.keyMap.keys():
for k, v in self.components[key].items():
self.keyMap[key][k] = v
elif dot_key and dot_key in self.keyMap.keys():
for k, v in self.components[key].items():
self.keyMap[dot_key][k] = v
# validate default values in lists
self._validate_defaults(self.keyMap)
# construct queryRules property from class model rules
self.queryRules = {}
for key, value in self.__rules__['components'].items():
remove_from_query = [ 'required_field', 'default_value', 'example_values', 'field_title', 'field_position', 'field_description', 'field_metadata', 'extra_fields' ]
field_qualifiers = {
'value_exists': False
}
for k, v in value.items():
if k not in remove_from_query:
field_qualifiers[k] = v
self.queryRules[key] = field_qualifiers
# validate query rules input and replace queryRules property
if query_rules:
if not isinstance(query_rules, dict):
message = 'Value for query rules input must be a dictionary.'
raise ModelValidationError(message)
input_set = set(query_rules.keys())
req_set = set(self.queryRules.keys())
if input_set - req_set:
message = 'Query rules input may only have %s field key names.' % req_set
raise ModelValidationError(message)
elif req_set - input_set:
message = 'Query rules input must have all %s field key names.' % req_set
raise ModelValidationError(message)
for key in req_set:
if not isinstance(query_rules[key], dict):
message = 'Value for query rules %s field must be a dictionary.' % key
raise ModelValidationError(message)
input_qualifier_set = set(query_rules[key].keys())
req_qualifier_set = set(self.queryRules[key].keys())
if input_qualifier_set - req_qualifier_set:
message = 'Query rules field %s may only have qualifiers %s' % (key, req_qualifier_set)
raise ModelValidationError(message)
for k, v in query_rules[key].items():
if v.__class__ != self.queryRules[key][k].__class__:
qualifier_index = self._datatype_classes.index(self.queryRules[key][k].__class__)
qualifier_type = self._datatype_names[qualifier_index]
message = 'Value for query rules field %s qualifier %s must be a "%s" datatype.' % (key, k, qualifier_type)
raise ModelValidationError(message)
self.queryRules = query_rules
def _validate_fields(self, fields_dict, fields_rules, declared_value=True):
# validate key names in fields
for key, value in fields_dict.items():
# convert javascript dot_path to class dot_path
if not key:
key = '.'
else:
if key[0] != '.':
key = '.%s' % key
if key not in self.keyName:
raise ModelValidationError('Field %s is not a field declared in model schema.' % key)
elif not isinstance(value, dict):
raise ModelValidationError('Value for field %s must be a dictionary.' % key)
# validate field criteria are appropriate to field datatype
value_type = self.keyCriteria[self.keyName.index(key)]['value_datatype']
type_dict = {}
if value_type == 'string':
type_dict = fields_rules['.string_fields']
elif value_type == 'number':
type_dict = fields_rules['.number_fields']
elif value_type == 'boolean':
type_dict = fields_rules['.boolean_fields']
elif value_type == 'list':
type_dict = fields_rules['.list_fields']
elif value_type == 'map':
type_dict = fields_rules['.map_fields']
elif value_type == 'null':
type_dict = fields_rules['.null_fields']
if set(value.keys()) - set(type_dict.keys()):
raise ModelValidationError('Field %s may only have datatype %s qualifiers %s.' % (key, value_type, set(type_dict.keys())))
# validate criteria qualifier values are appropriate datatype
for k, v in value.items():
v_index = self._datatype_classes.index(v.__class__)
v_type = self._datatype_names[v_index]
qualifier_index = self._datatype_classes.index(type_dict[k].__class__)
qualifier_type = self._datatype_names[qualifier_index]
if v_type != qualifier_type:
message = 'Value for field %s qualifier %s must be a %s datatype.' % (key, k, qualifier_type)
raise ModelValidationError(message)
if qualifier_type == 'number':
if isinstance(type_dict[k], int):
if not isinstance(v, int):
message = 'Value for field %s qualifier %s must be an integer.' % (key, k)
raise ModelValidationError(message)
# validate internal logic of each qualifier value declaration
if k in ('must_not_contain', 'must_contain', 'contains_either'):
for item in v:
if not isinstance(item, str):
message = 'Each item in list field %s qualifier %s must be a string.' % (key, k)
raise ModelValidationError(message)
if k in ('min_length', 'max_length', 'min_size', 'max_size'):
if v < 0:
message = 'Value for field %s qualifier %s cannot be negative.' % (key, k)
raise ModelValidationError(message)
if k in ('discrete_values', 'excluded_values', 'example_values'):
for item in v:
if value_type == 'number':
if not isinstance(item, int) and not isinstance(item, float):
message = 'Each item in field %s qualifier %s list must be a number.' % (key, k)
raise ModelValidationError(message)
elif not isinstance(item, str):
message = 'Each item in list for field %s qualifier %s must be a string.' % (key, k)
raise ModelValidationError(message)
if k == 'identical_to':
if not v in self.keyName:
message = 'Value "%s" for field %s qualifier %s not found in components keys.' % (v, key, k)
raise ModelValidationError(message)
if k == 'unique_values':
if v:
item_name = key + '[0]'
item_type = self.keyCriteria[self.keyName.index(item_name)]['value_datatype']
if not item_type in ('number', 'string'):
message = 'Field %s[0] must be either a string or number if qualifier "unique_values": true' % key
raise ModelValidationError(message)
# validate lack of other qualifiers if value exist is false
if 'value_exists' in value.keys():
if not value['value_exists']:
if set(value.keys()) - {'value_exists'}:
message = 'If field %s qualifier value_exists: false, field may not have other qualifiers.' % key
raise ModelValidationError(message)
# validate size qualifiers against each other
size_qualifiers = ['min_size', 'max_size']
for qualifier in size_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
header = 'Value %s for %s' % (test_value, value_path)
if 'min_size' in value.keys():
if test_value < value['min_size']:
message = '%s must not be less than "min_size": %s' % (header, value['min_size'])
raise ModelValidationError(message)
if 'max_size' in value.keys():
if test_value > value['max_size']:
message = '%s must not be greater than "max_size": %s' % (header, value['max_size'])
raise ModelValidationError(message)
# validate length qualifiers against each other
length_qualifiers = ['min_length', 'max_length']
for qualifier in length_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
header = 'Value %s for %s' % (test_value, value_path)
if 'min_length' in value.keys():
if test_value < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if test_value > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
# validate range qualifiers against each other & length qualifiers
range_qualifiers = ['min_value', 'max_value', 'greater_than', 'less_than', 'equal_to']
for qualifier in range_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
header = 'Value %s%s%s for %s' % (quote_text, test_value, quote_text, value_path)
if 'min_value' in value.keys():
if test_value < value['min_value']:
message = '%s must not be less than "min_value": %s' % (header, value['min_value'])
raise ModelValidationError(message)
if 'max_value' in value.keys():
if test_value > value['max_value']:
message = '%s must not be greater than "max_value": %s' % (header, value['max_value'])
raise ModelValidationError(message)
if 'greater_than' in value.keys():
if test_value <= value['greater_than'] and not qualifier == 'greater_than':
message = '%s must be "greater_than": %s' % (header, value['greater_than'])
raise ModelValidationError(message)
if 'less_than' in value.keys():
if test_value >= value['less_than'] and not qualifier == 'less_than':
message = '%s must be "less_than": %s' % (header, value['less_than'])
raise ModelValidationError(message)
if 'min_length' in value.keys():
if len(test_value) < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if len(test_value) > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
if 'integer_data' in value.keys():
if value['integer_data']:
if not isinstance(test_value, int):
message = '%s must be an "integer_data".' % header
raise ModelValidationError(message)
if 'must_not_contain' in value.keys():
for regex in value['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
message = '%s matches regex pattern in "must_not_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'must_contain' in value.keys():
for regex in value['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(test_value):
message = '%s does not match regex pattern in "must_contain": ["%s"].' % (header, regex)
raise ModelValidationError(message)
if 'contains_either' in value.keys():
regex_match = False
regex_patterns = []
for regex in value['contains_either']:
regex_patterns.append(regex)
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
regex_match = True
if not regex_match:
message = '%s does not match any regex patterns in "contains_either": %s' % (header, regex_patterns)
raise ModelValidationError(message)
if 'byte_data' in value.keys():
if value['byte_data']:
if qualifier != 'equal_to':
message = '%s cannot be used with base64 encoded "byte_data".' % header
raise ModelValidationError(message)
# validate discrete value qualifiers against other criteria
schema_field = self.keyCriteria[self.keyName.index(key)]
discrete_qualifiers = ['declared_value', 'default_value', 'excluded_values', 'discrete_values', 'example_values']
for qualifier in discrete_qualifiers:
test_qualifier = False
if qualifier in schema_field:
test_qualifier = True
if qualifier == 'declared_value' and not schema_field[qualifier]:
test_qualifier = False
if qualifier in value.keys() or (test_qualifier and declared_value):
multiple_values = False
if qualifier in value.keys():
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
else:
test_list = [schema_field[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
header = 'Value %s%s%s for %s%s' % (quote_text, test_value, quote_text, value_path, item_text)
if 'min_value' in value.keys():
if test_value < value['min_value']:
message = '%s must not be less than "min_value": %s' % (header, value['min_value'])
raise ModelValidationError(message)
if 'max_value' in value.keys():
if test_value > value['max_value']:
message = '%s must not be greater than "max_value": %s' % (header, value['max_value'])
raise ModelValidationError(message)
if 'equal_to' in value.keys():
if test_value != value['equal_to']:
if qualifier != 'declared_value' and isinstance(test_value, bool):
message = '%s must be "equal_to": %s' % (header, value['equal_to'])
raise ModelValidationError(message)
if 'greater_than' in value.keys():
if test_value <= value['greater_than']:
message = '%s must be "greater_than": %s' % (header, value['greater_than'])
raise ModelValidationError(message)
if 'less_than' in value.keys():
if test_value >= value['less_than']:
message = '%s must be "less_than": %s' % (header, value['less_than'])
raise ModelValidationError(message)
if 'integer_data' in value.keys():
if value['integer_data']:
if not isinstance(test_value, int):
message = '%s must be an "integer_data".' % header
raise ModelValidationError(message)
if 'min_length' in value.keys():
if len(test_value) < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if len(test_value) > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
if 'must_not_contain' in value.keys():
for regex in value['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
message = '%s matches regex pattern in "must_not_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'must_contain' in value.keys():
for regex in value['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(test_value):
message = '%s does not match regex pattern in "must_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'contains_either' in value.keys():
regex_match = False
regex_patterns = []
for regex in value['contains_either']:
regex_patterns.append(regex)
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
regex_match = True
if not regex_match:
message = '%s does not match any regex patterns in "contains_either": %s' % (header, regex_patterns)
raise ModelValidationError(message)
if 'byte_data' in value.keys():
message = '%s cannot be base64 decoded to "byte_data".' % header
try:
decoded_bytes = b64decode(test_value)
except:
raise ModelValidationError(message)
if not isinstance(decoded_bytes, bytes):
raise ModelValidationError(message)
# validate discrete value qualifiers against each other
for qualifier in discrete_qualifiers:
test_qualifier = False
if qualifier in schema_field:
test_qualifier = True
if qualifier == 'declared_value' and not schema_field[qualifier]:
test_qualifier = False
if qualifier in value.keys() or (test_qualifier and declared_value):
multiple_values = False
if qualifier in value.keys():
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
else:
test_list = [schema_field[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
header = 'Value %s%s%s for %s%s' % (quote_text, test_value, quote_text, value_path, item_text)
if 'excluded_values' in value.keys():
if not qualifier == 'excluded_values':
if test_value in value['excluded_values']:
message = '%s cannot be one of "excluded_values": %s.' % (header, value['excluded_values'])
raise ModelValidationError(message)
if 'discrete_values' in value.keys():
if not qualifier == 'excluded_values':
if test_value not in value['discrete_values']:
message = '%s must be one of "discrete_values": %s' % (header, value['discrete_values'])
raise ModelValidationError(message)
return fields_dict
def _validate_defaults(self, fields_dict):
# validate key names in fields
for key, value in fields_dict.items():
# retrieve value type and type dict
value_type = self.keyCriteria[self.keyName.index(key)]['value_datatype']
# validate discrete value qualifiers against other criteria
qualifier = 'default_value'
if qualifier in value.keys():
multiple_values = False
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
qualifier_text = value_path + item_text
header = 'Value %s%s%s for %s' % (quote_text, test_value, quote_text, qualifier_text)
if value_type == 'list':
default_item_key = '%s[0]' % key
try:
self.validate(test_value, default_item_key, object_title=header)
except Exception as err:
raise ModelValidationError(str(err).strip().replace('field %s' % default_item_key, qualifier_text))
return fields_dict
def _evaluate_field(self, record_dict, field_name, field_criteria):
''' a helper method for evaluating record values based upon query criteria
:param record_dict: dictionary with model valid data to evaluate
:param field_name: string with path to root of query field
:param field_criteria: dictionary with query operators and qualifiers
:return: boolean (True if no field_criteria evaluate to false)
'''
# determine value existence criteria
value_exists = True
if 'value_exists' in field_criteria.keys():
if not field_criteria['value_exists']:
value_exists = False
# validate existence of field
field_exists = True
try:
record_values = self._walk(field_name, record_dict)
except:
field_exists = False
# evaluate existence query criteria
if value_exists != field_exists:
return False
elif not value_exists:
return True
# convert javascript dot_path to class dot_path
field_key = field_name
if not field_name:
field_key = '.'
else:
if field_name[0] != '.':
field_key = '.%s' % field_name
# evaluate other query criteria
for key, value in field_criteria.items():
if key in ('min_size', 'min_length'):
found = False
if self.keyMap[field_key]['value_datatype'] == 'map':
for record_value in record_values:
record_size = sys.getsizeof(json.dumps(str(record_value)).replace(' ','')) - 51
if record_size >= value:
found = True
break
else:
for record_value in record_values:
if len(record_value) >= value:
found = True
break
if not found:
return False
elif key in ('max_size', 'max_length'):
found = False
if self.keyMap[field_key]['value_datatype'] == 'map':
for record_value in record_values:
record_size = sys.getsizeof(json.dumps(str(record_value)).replace(' ','')) - 51
if record_size <= value:
found = True
break
else:
for record_value in record_values:
if len(record_value) <= value:
found = True
break
if not found:
return False
elif key == 'min_value':
found = False
for record_value in record_values:
if record_value >= value:
found = True
break
if not found:
return False
elif key == 'max_value':
found = False
for record_value in record_values:
if record_value <= value:
found = True
break
if not found:
return False
elif key == 'equal_to':
found = False
for record_value in record_values:
if record_value == value:
found = True
break
if not found:
return False
elif key == 'greater_than':
found = False
for record_value in record_values:
if record_value > value:
found = True
break
if not found:
return False
elif key == 'less_than':
found = False
for record_value in record_values:
if record_value < value:
found = True
break
if not found:
return False
elif key == 'excluded_values':
for record_value in record_values:
if record_value in value:
return False
elif key == 'discrete_values':
found = False
for record_value in record_values:
if record_value in value:
found = True
break
if not found:
return False
elif key == 'integer_data':
found = False
dummy_int = 1
for record_value in record_values:
if record_value.__class__ == dummy_int.__class__:
found = True
break
if value != found:
return False
elif key == 'byte_data':
found = False
for record_value in record_values:
try:
decoded_bytes = b64decode(record_value)
except:
decoded_bytes = ''
if isinstance(decoded_bytes, bytes):
found = True
break
if value != found:
return False
elif key == 'must_contain':
for regex in value:
regex_pattern = re.compile(regex)
found = False
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if not found:
return False
elif key == 'must_not_contain':
for regex in value:
regex_pattern = re.compile(regex)
found = False
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if found:
return False
elif key == 'contains_either':
found = False
for regex in value:
regex_pattern = re.compile(regex)
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if found:
break
if not found:
return False
elif key == 'unique_values':
for record_value in record_values:
unique_values = True
if len(record_value) != len(set(record_value)):
unique_values = False
if value != unique_values:
return False
return True
def _validate_dict(self, input_dict, schema_dict, path_to_root, object_title=''):
''' a helper method for recursively validating keys in dictionaries
:return input_dict
'''
# reconstruct key path to current dictionary in model
rules_top_level_key = re.sub('\[\d+\]', '[0]', path_to_root)
map_rules = self.keyMap[rules_top_level_key]
# construct list error report template
map_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': map_rules,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': 0,
'error_code': 4001
}
# validate map size
if 'min_size' in map_rules.keys():
input_size = sys.getsizeof(json.dumps(str(input_dict)).replace(' ','')) - 51
if input_size < map_rules['min_size']:
map_error['failed_test'] = 'min_size'
map_error['error_value'] = input_size
map_error['error_code'] = 4031
raise InputValidationError(map_error)
if 'max_size' in map_rules.keys():
input_size = sys.getsizeof(json.dumps(str(input_dict)).replace(' ','')) - 51
if input_size > map_rules['max_size']:
map_error['failed_test'] = 'max_size'
map_error['error_value'] = input_size
map_error['error_code'] = 4032
raise InputValidationError(map_error)
# construct lists of keys in input dictionary
input_keys = []
input_key_list = []
for key in input_dict.keys():
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'key_datatype',
'input_path': path_to_root,
'error_value': key,
'error_code': 4004
}
error_dict['input_criteria']['key_datatype'] = 'string'
if path_to_root == '.':
if not isinstance(key, str):
input_key_name = path_to_root + str(key)
error_dict['input_path'] = input_key_name
raise InputValidationError(error_dict)
input_key_name = path_to_root + key
else:
if not isinstance(key, str):
input_key_name = path_to_root + '.' + str(key)
error_dict['input_path'] = input_key_name
raise InputValidationError(error_dict)
input_key_name = path_to_root + '.' + key
input_keys.append(input_key_name)
input_key_list.append(key)
# TODO: validate top-level key and values against identical to reference
# TODO: run lambda function and call validation
# construct lists of keys in schema dictionary
max_keys = []
max_key_list = []
req_keys = []
req_key_list = []
for key in schema_dict.keys():
if path_to_root == '.':
schema_key_name = path_to_root + key
else:
schema_key_name = path_to_root + '.' + key
max_keys.append(schema_key_name)
max_key_list.append(key)
rules_schema_key_name = re.sub('\[\d+\]', '[0]', schema_key_name)
if self.keyMap[rules_schema_key_name]['required_field']:
req_keys.append(schema_key_name)
req_key_list.append(key)
# validate existence of required fields
missing_keys = set(req_keys) - set(input_keys)
if missing_keys:
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'required_field',
'input_path': path_to_root,
'error_value': list(missing_keys),
'error_code': 4002
}
error_dict['input_criteria']['required_keys'] = req_keys
raise InputValidationError(error_dict)
# validate existence of extra fields
extra_keys = set(input_keys) - set(max_keys)
if extra_keys and not self.keyMap[rules_top_level_key]['extra_fields']:
extra_key_list = []
for key in extra_keys:
pathless_key = re.sub(rules_top_level_key, '', key, count=1)
extra_key_list.append(pathless_key)
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'extra_fields',
'input_path': path_to_root,
'error_value': extra_key_list,
'error_code': 4003
}
error_dict['input_criteria']['maximum_scope'] = max_key_list
raise InputValidationError(error_dict)
# validate datatype of value
for key, value in input_dict.items():
if path_to_root == '.':
input_key_name = path_to_root + key
else:
input_key_name = path_to_root + '.' + key
rules_input_key_name = re.sub('\[\d+\]', '[0]', input_key_name)
if input_key_name in max_keys:
input_criteria = self.keyMap[rules_input_key_name]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': input_key_name,
'error_value': value,
'error_code': 4001
}
try:
value_index = self._datatype_classes.index(value.__class__)
except:
error_dict['error_value'] = value.__class__.__name__
raise InputValidationError(error_dict)
value_type = self._datatype_names[value_index]
if input_criteria['value_datatype'] == 'null':
pass
else:
if value_type != input_criteria['value_datatype']:
raise InputValidationError(error_dict)
# call appropriate validation sub-routine for datatype of value
if value_type == 'boolean':
input_dict[key] = self._validate_boolean(value, input_key_name, object_title)
elif value_type == 'number':
input_dict[key] = self._validate_number(value, input_key_name, object_title)
elif value_type == 'string':
input_dict[key] = self._validate_string(value, input_key_name, object_title)
elif value_type == 'map':
input_dict[key] = self._validate_dict(value, schema_dict[key], input_key_name, object_title)
elif value_type == 'list':
input_dict[key] = self._validate_list(value, schema_dict[key], input_key_name, object_title)
# set default values for empty optional fields
for key in max_key_list:
if key not in input_key_list:
indexed_key = max_keys[max_key_list.index(key)]
if indexed_key in self.components.keys():
if 'default_value' in self.components[indexed_key]:
input_dict[key] = self.components[indexed_key]['default_value']
return input_dict
def _validate_list(self, input_list, schema_list, path_to_root, object_title=''):
'''
a helper method for recursively validating items in a list
:return: input_list
'''
# construct rules for list and items
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
list_rules = self.keyMap[rules_path_to_root]
initial_key = rules_path_to_root + '[0]'
item_rules = self.keyMap[initial_key]
# construct list error report template
list_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': list_rules,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': 0,
'error_code': 4001
}
# validate list rules
if 'min_size' in list_rules.keys():
if len(input_list) < list_rules['min_size']:
list_error['failed_test'] = 'min_size'
list_error['error_value'] = len(input_list)
list_error['error_code'] = 4031
raise InputValidationError(list_error)
if 'max_size' in list_rules.keys():
if len(input_list) > list_rules['max_size']:
list_error['failed_test'] = 'max_size'
list_error['error_value'] = len(input_list)
list_error['error_code'] = 4032
raise InputValidationError(list_error)
# construct item error report template
item_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': item_rules,
'failed_test': 'value_datatype',
'input_path': initial_key,
'error_value': None,
'error_code': 4001
}
# validate datatype of items
for i in range(len(input_list)):
input_path = path_to_root + '[%s]' % i
item = input_list[i]
item_error['input_path'] = input_path
try:
item_index = self._datatype_classes.index(item.__class__)
except:
item_error['error_value'] = item.__class__.__name__
raise InputValidationError(item_error)
item_type = self._datatype_names[item_index]
item_error['error_value'] = item
if item_rules['value_datatype'] == 'null':
pass
else:
if item_type != item_rules['value_datatype']:
raise InputValidationError(item_error)
# call appropriate validation sub-routine for datatype of item
if item_type == 'boolean':
input_list[i] = self._validate_boolean(item, input_path, object_title)
elif item_type == 'number':
input_list[i] = self._validate_number(item, input_path, object_title)
elif item_type == 'string':
input_list[i] = self._validate_string(item, input_path, object_title)
elif item_type == 'map':
input_list[i] = self._validate_dict(item, schema_list[0], input_path, object_title)
elif item_type == 'list':
input_list[i] = self._validate_list(item, schema_list[0], input_path, object_title)
# validate unique values in list
if 'unique_values' in list_rules.keys():
if len(set(input_list)) < len(input_list):
list_error['failed_test'] = 'unique_values'
list_error['error_value'] = input_list
list_error['error_code'] = 4033
raise InputValidationError(list_error)
# TODO: validate top-level item values against identical to reference
# TODO: run lambda function and call validation url
return input_list
def _validate_number(self, input_number, path_to_root, object_title=''):
'''
a helper method for validating properties of a number
:return: input_number
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_number,
'error_code': 4001
}
if 'integer_data' in input_criteria.keys():
if input_criteria['integer_data'] and not isinstance(input_number, int):
error_dict['failed_test'] = 'integer_data'
error_dict['error_code'] = 4021
raise InputValidationError(error_dict)
if 'min_value' in input_criteria.keys():
if input_number < input_criteria['min_value']:
error_dict['failed_test'] = 'min_value'
error_dict['error_code'] = 4022
raise InputValidationError(error_dict)
if 'max_value' in input_criteria.keys():
if input_number > input_criteria['max_value']:
error_dict['failed_test'] = 'max_value'
error_dict['error_code'] = 4023
raise InputValidationError(error_dict)
if 'greater_than' in input_criteria.keys():
if input_number <= input_criteria['greater_than']:
error_dict['failed_test'] = 'greater_than'
error_dict['error_code'] = 4024
raise InputValidationError(error_dict)
if 'less_than' in input_criteria.keys():
if input_number >= input_criteria['less_than']:
error_dict['failed_test'] = 'less_than'
error_dict['error_code'] = 4025
raise InputValidationError(error_dict)
if 'equal_to' in input_criteria.keys():
if input_number != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
if 'discrete_values' in input_criteria.keys():
if input_number not in input_criteria['discrete_values']:
error_dict['failed_test'] = 'discrete_values'
error_dict['error_code'] = 4041
raise InputValidationError(error_dict)
if 'excluded_values' in input_criteria.keys():
if input_number in input_criteria['excluded_values']:
error_dict['failed_test'] = 'excluded_values'
error_dict['error_code'] = 4042
raise InputValidationError(error_dict)
# TODO: validate number against identical to reference
# TODO: run lambda function and call validation url
return input_number
def _validate_boolean(self, input_boolean, path_to_root, object_title=''):
'''
a helper method for validating properties of a boolean
:return: input_boolean
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_boolean,
'error_code': 4001
}
if 'equal_to' in input_criteria.keys():
if input_boolean != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
# TODO: validate boolean against identical to reference
# TODO: run lambda function and call validation url
return input_boolean
def _ingest_dict(self, input_dict, schema_dict, path_to_root):
'''
a helper method for ingesting keys, value pairs in a dictionary
:return: valid_dict
'''
valid_dict = {}
# construct path to root for rules
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
# iterate over keys in schema dict
for key, value in schema_dict.items():
key_path = path_to_root
if not key_path == '.':
key_path += '.'
key_path += key
rules_key_path = re.sub('\[\d+\]', '[0]', key_path)
value_match = False
if key in input_dict.keys():
value_index = self._datatype_classes.index(value.__class__)
value_type = self._datatype_names[value_index]
try:
v_index = self._datatype_classes.index(input_dict[key].__class__)
v_type = self._datatype_names[v_index]
if v_type == value_type:
value_match = True
except:
value_match = False
if value_match:
if value_type == 'null':
valid_dict[key] = input_dict[key]
elif value_type == 'boolean':
valid_dict[key] = self._ingest_boolean(input_dict[key], key_path)
elif value_type == 'number':
valid_dict[key] = self._ingest_number(input_dict[key], key_path)
elif value_type == 'string':
valid_dict[key] = self._ingest_string(input_dict[key], key_path)
elif value_type == 'map':
valid_dict[key] = self._ingest_dict(input_dict[key], schema_dict[key], key_path)
elif value_type == 'list':
valid_dict[key] = self._ingest_list(input_dict[key], schema_dict[key], key_path)
else:
value_type = self.keyMap[rules_key_path]['value_datatype']
if 'default_value' in self.keyMap[rules_key_path]:
valid_dict[key] = self.keyMap[rules_key_path]['default_value']
elif value_type == 'null':
valid_dict[key] = None
elif value_type == 'boolean':
valid_dict[key] = False
elif value_type == 'number':
valid_dict[key] = 0.0
if 'integer_data' in self.keyMap[rules_key_path].keys():
if self.keyMap[rules_key_path]['integer_data']:
valid_dict[key] = 0
elif value_type == 'string':
valid_dict[key] = ''
elif value_type == 'list':
valid_dict[key] = []
elif value_type == 'map':
valid_dict[key] = self._ingest_dict({}, schema_dict[key], key_path)
# add extra fields if set to True
if self.keyMap[rules_path_to_root]['extra_fields']:
for key, value in input_dict.items():
if key not in valid_dict.keys():
valid_dict[key] = value
return valid_dict
def _ingest_list(self, input_list, schema_list, path_to_root):
'''
a helper method for ingesting items in a list
:return: valid_list
'''
valid_list = []
# construct max list size
max_size = None
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'max_size' in self.keyMap[rules_path_to_root].keys():
if not self.keyMap[rules_path_to_root]['max_size']:
return valid_list
else:
max_size = self.keyMap[rules_path_to_root]['max_size']
# iterate over items in input list
if input_list:
rules_index = self._datatype_classes.index(schema_list[0].__class__)
rules_type = self._datatype_names[rules_index]
for i in range(len(input_list)):
item_path = '%s[%s]' % (path_to_root, i)
value_match = False
try:
item_index = self._datatype_classes.index(input_list[i].__class__)
item_type = self._datatype_names[item_index]
if item_type == rules_type:
value_match = True
except:
value_match = False
if value_match:
try:
if item_type == 'boolean':
valid_list.append(self._validate_boolean(input_list[i], item_path))
elif item_type == 'number':
valid_list.append(self._validate_number(input_list[i], item_path))
elif item_type == 'string':
valid_list.append(self._validate_string(input_list[i], item_path))
elif item_type == 'map':
valid_list.append(self._ingest_dict(input_list[i], schema_list[0], item_path))
elif item_type == 'list':
valid_list.append(self._ingest_list(input_list[i], schema_list[0], item_path))
except:
pass
if isinstance(max_size, int):
if len(valid_list) == max_size:
return valid_list
return valid_list
def _ingest_number(self, input_number, path_to_root):
'''
a helper method for ingesting a number
:return: valid_number
'''
valid_number = 0.0
try:
valid_number = self._validate_number(input_number, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_number = self.keyMap[rules_path_to_root]['default_value']
elif 'integer_data' in self.keyMap[rules_path_to_root].keys():
if self.keyMap[rules_path_to_root]['integer_data']:
valid_number = 0
return valid_number
def _ingest_string(self, input_string, path_to_root):
'''
a helper method for ingesting a string
:return: valid_string
'''
valid_string = ''
try:
valid_string = self._validate_string(input_string, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_string = self.keyMap[rules_path_to_root]['default_value']
return valid_string
def _ingest_boolean(self, input_boolean, path_to_root):
'''
a helper method for ingesting a boolean
:return: valid_boolean
'''
valid_boolean = False
try:
valid_boolean = self._validate_boolean(input_boolean, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_boolean = self.keyMap[rules_path_to_root]['default_value']
return valid_boolean
def _reconstruct(self, path_to_root):
'''
a helper method for finding the schema endpoint from a path to root
:param path_to_root: string with dot path to root from
:return: list, dict, string, number, or boolean at path to root
'''
# split path to root into segments
item_pattern = re.compile('\d+\\]')
dot_pattern = re.compile('\\.|\\[')
path_segments = dot_pattern.split(path_to_root)
# construct base schema endpoint
schema_endpoint = self.schema
# reconstruct schema endpoint from segments
if path_segments[1]:
for i in range(1,len(path_segments)):
if item_pattern.match(path_segments[i]):
schema_endpoint = schema_endpoint[0]
else:
schema_endpoint = schema_endpoint[path_segments[i]]
return schema_endpoint
def _walk(self, path_to_root, record_dict):
'''
a helper method for finding the record endpoint from a path to root
:param path_to_root: string with dot path to root from
:param record_dict:
:return: list, dict, string, number, or boolean at path to root
'''
# split path to root into segments
item_pattern = re.compile('\d+\\]')
dot_pattern = re.compile('\\.|\\[')
path_segments = dot_pattern.split(path_to_root)
# construct empty fields
record_endpoints = []
# determine starting position
if not path_segments[0]:
path_segments.pop(0)
# define internal recursive function
def _walk_int(path_segments, record_dict):
record_endpoint = record_dict
for i in range(0, len(path_segments)):
if item_pattern.match(path_segments[i]):
for j in range(0, len(record_endpoint)):
if len(path_segments) == 2:
record_endpoints.append(record_endpoint[j])
else:
stop_chain = False
for x in range(0, i):
if item_pattern.match(path_segments[x]):
stop_chain = True
if not stop_chain:
shortened_segments = []
for z in range(i + 1, len(path_segments)):
shortened_segments.append(path_segments[z])
_walk_int(shortened_segments, record_endpoint[j])
else:
stop_chain = False
for y in range(0, i):
if item_pattern.match(path_segments[y]):
stop_chain = True
if not stop_chain:
if len(path_segments) == i + 1:
record_endpoints.append(record_endpoint[path_segments[i]])
else:
record_endpoint = record_endpoint[path_segments[i]]
# conduct recursive walk
_walk_int(path_segments, record_dict)
return record_endpoints
def validate(self, input_data, path_to_root='', object_title=''):
'''
a core method for validating input against the model
input_data is only returned if all data is valid
:param input_data: list, dict, string, number, or boolean to validate
:param path_to_root: [optional] string with dot-path of model component
:param object_title: [optional] string with name of input to validate
:return: input_data (or InputValidationError)
'''
__name__ = '%s.validate' % self.__class__.__name__
_path_arg = '%s(path_to_root="...")' % __name__
_title_arg = '%s(object_title="...")' % __name__
# validate input
copy_path = path_to_root
if path_to_root:
if not isinstance(path_to_root, str):
raise ModelValidationError('%s must be a string.' % _path_arg)
else:
if path_to_root[0] != '.':
copy_path = '.%s' % path_to_root
if not copy_path in self.keyMap.keys():
raise ModelValidationError('%s does not exist in components %s.' % (_path_arg.replace('...', path_to_root), self.keyMap.keys()))
else:
copy_path = '.'
if object_title:
if not isinstance(object_title, str):
raise ModelValidationError('%s must be a string' % _title_arg)
# construct generic error dictionary
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[copy_path],
'failed_test': 'value_datatype',
'input_path': copy_path,
'error_value': input_data,
'error_code': 4001
}
# determine value type of input data
try:
input_index = self._datatype_classes.index(input_data.__class__)
except:
error_dict['error_value'] = input_data.__class__.__name__
raise InputValidationError(error_dict)
input_type = self._datatype_names[input_index]
# validate input data type
if input_type != self.keyMap[copy_path]['value_datatype']:
raise InputValidationError(error_dict)
# run helper method appropriate to data type
if input_type == 'boolean':
input_data = self._validate_boolean(input_data, copy_path, object_title)
elif input_type == 'number':
input_data = self._validate_number(input_data, copy_path, object_title)
elif input_type == 'string':
input_data = self._validate_string(input_data, copy_path, object_title)
elif input_type == 'list':
schema_list = self._reconstruct(copy_path)
input_data = self._validate_list(input_data, schema_list, copy_path, object_title)
elif input_type == 'map':
schema_dict = self._reconstruct(copy_path)
input_data = self._validate_dict(input_data, schema_dict, copy_path, object_title)
return input_data
def ingest(self, **kwargs):
'''
a core method to ingest and validate arbitrary keyword data
**NOTE: data is always returned with this method**
for each key in the model, a value is returned according
to the following priority:
1. value in kwargs if field passes validation test
2. default value declared for the key in the model
3. empty value appropriate to datatype of key in the model
**NOTE: as long as a default value is provided for each key-
value, returned data will be model valid
**NOTE: if 'extra_fields' is True for a dictionary, the key-
value pair of all fields in kwargs which are not declared in
the model will also be added to the corresponding dictionary
data
**NOTE: if 'max_size' is declared for a list, method will
stop adding input to the list once it reaches max size
:param kwargs: key, value pairs
:return: dictionary with keys and value
'''
__name__ = '%s.ingest' % self.__class__.__name__
schema_dict = self.schema
path_to_root = '.'
valid_data = self._ingest_dict(kwargs, schema_dict, path_to_root)
return valid_data
def query(self, query_criteria, valid_record=None):
'''
a core method for querying model valid data with criteria
**NOTE: input is only returned if all fields & qualifiers are valid for model
:param query_criteria: dictionary with model field names and query qualifiers
:param valid_record: dictionary with model valid record
:return: boolean (or QueryValidationError)
an example of how to construct the query_criteria argument:
query_criteria = {
'.path.to.number': {
'min_value': 4.5
},
'.path.to.string': {
'must_contain': [ '\\regex' ]
}
}
**NOTE: for a full list of operators for query_criteria based upon field
datatype, see either the query-rules.json file or REFERENCE file
'''
__name__ = '%s.query' % self.__class__.__name__
_query_arg = '%s(query_criteria={...})' % __name__
_record_arg = '%s(valid_record={...})' % __name__
# validate input
if not isinstance(query_criteria, dict):
raise ModelValidationError('%s must be a dictionary.' % _query_arg)
# convert javascript dot_path to class dot_path
criteria_copy = {}
equal_fields = []
dot_fields = []
for key, value in query_criteria.items():
copy_key = key
if not key:
copy_key = '.'
else:
if key[0] != '.':
copy_key = '.%s' % key
dot_fields.append(copy_key)
criteria_copy[copy_key] = value
if value.__class__ in self._datatype_classes[0:4]:
criteria_copy[copy_key] = {
'equal_to': value
}
equal_fields.append(copy_key)
# validate query criteria against query rules
query_kwargs = {
'fields_dict': criteria_copy,
'fields_rules': self.queryRules,
'declared_value': False
}
try:
self._validate_fields(**query_kwargs)
except ModelValidationError as err:
message = err.error['message']
for field in equal_fields:
equal_error = 'field %s qualifier equal_to' % field
if message.find(equal_error) > -1:
message = message.replace(equal_error, 'field %s' % field)
break
field_pattern = re.compile('ield\s(\..*?)\s')
field_name = field_pattern.findall(message)
if field_name:
if field_name[0] in dot_fields:
def _replace_field(x):
return 'ield %s ' % x.group(1)[1:]
message = field_pattern.sub(_replace_field, message)
raise QueryValidationError(message)
# query test record
if valid_record:
if not isinstance(valid_record, dict):
raise ModelValidationError('%s must be a dictionary.' % _record_arg)
for key, value in criteria_copy.items():
eval_outcome = self._evaluate_field(valid_record, key, value)
if not eval_outcome:
return False
return True
|
collectiveacuity/jsonModel
|
jsonmodel/validators.py
|
jsonModel._validate_boolean
|
python
|
def _validate_boolean(self, input_boolean, path_to_root, object_title=''):
'''
a helper method for validating properties of a boolean
:return: input_boolean
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_boolean,
'error_code': 4001
}
if 'equal_to' in input_criteria.keys():
if input_boolean != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
# TODO: validate boolean against identical to reference
# TODO: run lambda function and call validation url
return input_boolean
|
a helper method for validating properties of a boolean
:return: input_boolean
|
train
|
https://github.com/collectiveacuity/jsonModel/blob/1ea64c36d78add3faa7b85ff82c5ec685458c940/jsonmodel/validators.py#L1171-L1200
| null |
class jsonModel(object):
__rules__ = jsonLoader('jsonmodel', 'models/model-rules.json')
def __init__(self, data_model, query_rules=None):
'''
a method for testing data model declaration & initializing the class
:param data_model: dictionary with json model architecture
:param query_rules: [optional] dictionary with valid field type qualifiers
:return: object with jsonModel methods
'''
# validate schema input
if not isinstance(data_model, dict):
raise ModelValidationError('Data model must be a dictionary.')
elif 'schema' not in data_model.keys():
raise ModelValidationError('Data model must have a schema key.')
elif not isinstance(data_model['schema'], dict):
raise ModelValidationError('Value for the data model "schema" field must be a dictionary.')
elif not data_model['schema']:
raise ModelValidationError('Data model "schema" field must not be empty.')
# construct base methods
from copy import deepcopy
data_model = deepcopy(data_model)
self.schema = data_model['schema']
model_map = mapModel(self.schema)
self.keyName = model_map.keyName
self.keyCriteria = model_map.keyCriteria
# construct protected type classes
self._datatype_names = mapModel._datatype_names
self._datatype_classes = mapModel._datatype_classes
# validate absence of item designators in keys
item_pattern = re.compile('\[\d+\]')
for i in range(len(self.keyName)):
patterns_found = item_pattern.findall(self.keyName[i])
if patterns_found:
for designator in patterns_found:
if designator != '[0]':
message = 'Key name for schema field %s must not contain the item designator pattern %s' % (self.keyName[i], designator)
raise ModelValidationError(message)
# validate existence of first item in list declarations
key_set = set(self.keyName)
for i in range(len(self.keyName)):
if self.keyCriteria[i]['value_datatype'] == 'list':
item_key = self.keyName[i] + '[0]'
if not item_key in key_set:
message = 'Schema field %s must declare an initial item for the list.' % self.keyName[i]
raise ModelValidationError(message)
# alter list requirement if first item is empty
else:
item_index = self.keyName.index(item_key)
if not self.keyCriteria[item_index]['required_field']:
self.keyCriteria[i]['required_field'] = False
# validate title input & construct title method
self.title = ''
if 'title' in data_model.keys():
if not isinstance(data_model['title'], str):
raise ModelValidationError('Value for model title must be a string.')
self.title = data_model['title']
# validate description input & construct description method
self.description = ''
if 'description' in data_model.keys():
if not isinstance(data_model['description'], str):
raise ModelValidationError('Value for model description must be a string.')
self.description = data_model['description']
# validate url input & construct title method
self.url = ''
if 'url' in data_model.keys():
if not isinstance(data_model['url'], str):
raise ModelValidationError('Value for model url must be a string.')
self.title = data_model['url']
# validate metadata input & construct metadata method
self.metadata = {}
if 'metadata' in data_model.keys():
if not isinstance(data_model['metadata'], dict):
raise ModelValidationError('Value for model metadata must be a dictionary.')
self.metadata = data_model['metadata']
# validate max size input & construct maxSize property
# self.maxSize = None
# if 'max_size' in data_model.keys():
# if not isinstance(data_model['max_size'], int):
# raise ModelValidationError('Value for model max_size must be a positive integer.')
# elif data_model['max_size'] < 0:
# raise ModelValidationError('Value for model max_size must be a positive integer.')
# elif data_model['max_size']:
# self.maxSize = data_model['max_size']
# validate components input & construct component property
self.components = {}
if 'components' in data_model.keys():
if not isinstance(data_model['components'], dict):
raise ModelValidationError('Value for model components must be a dictionary.')
self.components = self._validate_fields(data_model['components'], self.__rules__['components'])
# construct keyMap fields from key names and key criteria
self.keyMap = {}
for i in range(len(self.keyName)):
self.keyMap[self.keyName[i]] = self.keyCriteria[i]
for key, value in self.components.items():
# convert javascript dot_path to class dot_path
dot_key = ''
if not key:
dot_key = '.'
else:
if key[0] != '.':
dot_key = '.%s' % key
# add component declarations to keyMap
if key in self.keyMap.keys():
for k, v in self.components[key].items():
self.keyMap[key][k] = v
elif dot_key and dot_key in self.keyMap.keys():
for k, v in self.components[key].items():
self.keyMap[dot_key][k] = v
# validate default values in lists
self._validate_defaults(self.keyMap)
# construct queryRules property from class model rules
self.queryRules = {}
for key, value in self.__rules__['components'].items():
remove_from_query = [ 'required_field', 'default_value', 'example_values', 'field_title', 'field_position', 'field_description', 'field_metadata', 'extra_fields' ]
field_qualifiers = {
'value_exists': False
}
for k, v in value.items():
if k not in remove_from_query:
field_qualifiers[k] = v
self.queryRules[key] = field_qualifiers
# validate query rules input and replace queryRules property
if query_rules:
if not isinstance(query_rules, dict):
message = 'Value for query rules input must be a dictionary.'
raise ModelValidationError(message)
input_set = set(query_rules.keys())
req_set = set(self.queryRules.keys())
if input_set - req_set:
message = 'Query rules input may only have %s field key names.' % req_set
raise ModelValidationError(message)
elif req_set - input_set:
message = 'Query rules input must have all %s field key names.' % req_set
raise ModelValidationError(message)
for key in req_set:
if not isinstance(query_rules[key], dict):
message = 'Value for query rules %s field must be a dictionary.' % key
raise ModelValidationError(message)
input_qualifier_set = set(query_rules[key].keys())
req_qualifier_set = set(self.queryRules[key].keys())
if input_qualifier_set - req_qualifier_set:
message = 'Query rules field %s may only have qualifiers %s' % (key, req_qualifier_set)
raise ModelValidationError(message)
for k, v in query_rules[key].items():
if v.__class__ != self.queryRules[key][k].__class__:
qualifier_index = self._datatype_classes.index(self.queryRules[key][k].__class__)
qualifier_type = self._datatype_names[qualifier_index]
message = 'Value for query rules field %s qualifier %s must be a "%s" datatype.' % (key, k, qualifier_type)
raise ModelValidationError(message)
self.queryRules = query_rules
def _validate_fields(self, fields_dict, fields_rules, declared_value=True):
# validate key names in fields
for key, value in fields_dict.items():
# convert javascript dot_path to class dot_path
if not key:
key = '.'
else:
if key[0] != '.':
key = '.%s' % key
if key not in self.keyName:
raise ModelValidationError('Field %s is not a field declared in model schema.' % key)
elif not isinstance(value, dict):
raise ModelValidationError('Value for field %s must be a dictionary.' % key)
# validate field criteria are appropriate to field datatype
value_type = self.keyCriteria[self.keyName.index(key)]['value_datatype']
type_dict = {}
if value_type == 'string':
type_dict = fields_rules['.string_fields']
elif value_type == 'number':
type_dict = fields_rules['.number_fields']
elif value_type == 'boolean':
type_dict = fields_rules['.boolean_fields']
elif value_type == 'list':
type_dict = fields_rules['.list_fields']
elif value_type == 'map':
type_dict = fields_rules['.map_fields']
elif value_type == 'null':
type_dict = fields_rules['.null_fields']
if set(value.keys()) - set(type_dict.keys()):
raise ModelValidationError('Field %s may only have datatype %s qualifiers %s.' % (key, value_type, set(type_dict.keys())))
# validate criteria qualifier values are appropriate datatype
for k, v in value.items():
v_index = self._datatype_classes.index(v.__class__)
v_type = self._datatype_names[v_index]
qualifier_index = self._datatype_classes.index(type_dict[k].__class__)
qualifier_type = self._datatype_names[qualifier_index]
if v_type != qualifier_type:
message = 'Value for field %s qualifier %s must be a %s datatype.' % (key, k, qualifier_type)
raise ModelValidationError(message)
if qualifier_type == 'number':
if isinstance(type_dict[k], int):
if not isinstance(v, int):
message = 'Value for field %s qualifier %s must be an integer.' % (key, k)
raise ModelValidationError(message)
# validate internal logic of each qualifier value declaration
if k in ('must_not_contain', 'must_contain', 'contains_either'):
for item in v:
if not isinstance(item, str):
message = 'Each item in list field %s qualifier %s must be a string.' % (key, k)
raise ModelValidationError(message)
if k in ('min_length', 'max_length', 'min_size', 'max_size'):
if v < 0:
message = 'Value for field %s qualifier %s cannot be negative.' % (key, k)
raise ModelValidationError(message)
if k in ('discrete_values', 'excluded_values', 'example_values'):
for item in v:
if value_type == 'number':
if not isinstance(item, int) and not isinstance(item, float):
message = 'Each item in field %s qualifier %s list must be a number.' % (key, k)
raise ModelValidationError(message)
elif not isinstance(item, str):
message = 'Each item in list for field %s qualifier %s must be a string.' % (key, k)
raise ModelValidationError(message)
if k == 'identical_to':
if not v in self.keyName:
message = 'Value "%s" for field %s qualifier %s not found in components keys.' % (v, key, k)
raise ModelValidationError(message)
if k == 'unique_values':
if v:
item_name = key + '[0]'
item_type = self.keyCriteria[self.keyName.index(item_name)]['value_datatype']
if not item_type in ('number', 'string'):
message = 'Field %s[0] must be either a string or number if qualifier "unique_values": true' % key
raise ModelValidationError(message)
# validate lack of other qualifiers if value exist is false
if 'value_exists' in value.keys():
if not value['value_exists']:
if set(value.keys()) - {'value_exists'}:
message = 'If field %s qualifier value_exists: false, field may not have other qualifiers.' % key
raise ModelValidationError(message)
# validate size qualifiers against each other
size_qualifiers = ['min_size', 'max_size']
for qualifier in size_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
header = 'Value %s for %s' % (test_value, value_path)
if 'min_size' in value.keys():
if test_value < value['min_size']:
message = '%s must not be less than "min_size": %s' % (header, value['min_size'])
raise ModelValidationError(message)
if 'max_size' in value.keys():
if test_value > value['max_size']:
message = '%s must not be greater than "max_size": %s' % (header, value['max_size'])
raise ModelValidationError(message)
# validate length qualifiers against each other
length_qualifiers = ['min_length', 'max_length']
for qualifier in length_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
header = 'Value %s for %s' % (test_value, value_path)
if 'min_length' in value.keys():
if test_value < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if test_value > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
# validate range qualifiers against each other & length qualifiers
range_qualifiers = ['min_value', 'max_value', 'greater_than', 'less_than', 'equal_to']
for qualifier in range_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
header = 'Value %s%s%s for %s' % (quote_text, test_value, quote_text, value_path)
if 'min_value' in value.keys():
if test_value < value['min_value']:
message = '%s must not be less than "min_value": %s' % (header, value['min_value'])
raise ModelValidationError(message)
if 'max_value' in value.keys():
if test_value > value['max_value']:
message = '%s must not be greater than "max_value": %s' % (header, value['max_value'])
raise ModelValidationError(message)
if 'greater_than' in value.keys():
if test_value <= value['greater_than'] and not qualifier == 'greater_than':
message = '%s must be "greater_than": %s' % (header, value['greater_than'])
raise ModelValidationError(message)
if 'less_than' in value.keys():
if test_value >= value['less_than'] and not qualifier == 'less_than':
message = '%s must be "less_than": %s' % (header, value['less_than'])
raise ModelValidationError(message)
if 'min_length' in value.keys():
if len(test_value) < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if len(test_value) > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
if 'integer_data' in value.keys():
if value['integer_data']:
if not isinstance(test_value, int):
message = '%s must be an "integer_data".' % header
raise ModelValidationError(message)
if 'must_not_contain' in value.keys():
for regex in value['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
message = '%s matches regex pattern in "must_not_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'must_contain' in value.keys():
for regex in value['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(test_value):
message = '%s does not match regex pattern in "must_contain": ["%s"].' % (header, regex)
raise ModelValidationError(message)
if 'contains_either' in value.keys():
regex_match = False
regex_patterns = []
for regex in value['contains_either']:
regex_patterns.append(regex)
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
regex_match = True
if not regex_match:
message = '%s does not match any regex patterns in "contains_either": %s' % (header, regex_patterns)
raise ModelValidationError(message)
if 'byte_data' in value.keys():
if value['byte_data']:
if qualifier != 'equal_to':
message = '%s cannot be used with base64 encoded "byte_data".' % header
raise ModelValidationError(message)
# validate discrete value qualifiers against other criteria
schema_field = self.keyCriteria[self.keyName.index(key)]
discrete_qualifiers = ['declared_value', 'default_value', 'excluded_values', 'discrete_values', 'example_values']
for qualifier in discrete_qualifiers:
test_qualifier = False
if qualifier in schema_field:
test_qualifier = True
if qualifier == 'declared_value' and not schema_field[qualifier]:
test_qualifier = False
if qualifier in value.keys() or (test_qualifier and declared_value):
multiple_values = False
if qualifier in value.keys():
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
else:
test_list = [schema_field[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
header = 'Value %s%s%s for %s%s' % (quote_text, test_value, quote_text, value_path, item_text)
if 'min_value' in value.keys():
if test_value < value['min_value']:
message = '%s must not be less than "min_value": %s' % (header, value['min_value'])
raise ModelValidationError(message)
if 'max_value' in value.keys():
if test_value > value['max_value']:
message = '%s must not be greater than "max_value": %s' % (header, value['max_value'])
raise ModelValidationError(message)
if 'equal_to' in value.keys():
if test_value != value['equal_to']:
if qualifier != 'declared_value' and isinstance(test_value, bool):
message = '%s must be "equal_to": %s' % (header, value['equal_to'])
raise ModelValidationError(message)
if 'greater_than' in value.keys():
if test_value <= value['greater_than']:
message = '%s must be "greater_than": %s' % (header, value['greater_than'])
raise ModelValidationError(message)
if 'less_than' in value.keys():
if test_value >= value['less_than']:
message = '%s must be "less_than": %s' % (header, value['less_than'])
raise ModelValidationError(message)
if 'integer_data' in value.keys():
if value['integer_data']:
if not isinstance(test_value, int):
message = '%s must be an "integer_data".' % header
raise ModelValidationError(message)
if 'min_length' in value.keys():
if len(test_value) < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if len(test_value) > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
if 'must_not_contain' in value.keys():
for regex in value['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
message = '%s matches regex pattern in "must_not_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'must_contain' in value.keys():
for regex in value['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(test_value):
message = '%s does not match regex pattern in "must_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'contains_either' in value.keys():
regex_match = False
regex_patterns = []
for regex in value['contains_either']:
regex_patterns.append(regex)
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
regex_match = True
if not regex_match:
message = '%s does not match any regex patterns in "contains_either": %s' % (header, regex_patterns)
raise ModelValidationError(message)
if 'byte_data' in value.keys():
message = '%s cannot be base64 decoded to "byte_data".' % header
try:
decoded_bytes = b64decode(test_value)
except:
raise ModelValidationError(message)
if not isinstance(decoded_bytes, bytes):
raise ModelValidationError(message)
# validate discrete value qualifiers against each other
for qualifier in discrete_qualifiers:
test_qualifier = False
if qualifier in schema_field:
test_qualifier = True
if qualifier == 'declared_value' and not schema_field[qualifier]:
test_qualifier = False
if qualifier in value.keys() or (test_qualifier and declared_value):
multiple_values = False
if qualifier in value.keys():
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
else:
test_list = [schema_field[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
header = 'Value %s%s%s for %s%s' % (quote_text, test_value, quote_text, value_path, item_text)
if 'excluded_values' in value.keys():
if not qualifier == 'excluded_values':
if test_value in value['excluded_values']:
message = '%s cannot be one of "excluded_values": %s.' % (header, value['excluded_values'])
raise ModelValidationError(message)
if 'discrete_values' in value.keys():
if not qualifier == 'excluded_values':
if test_value not in value['discrete_values']:
message = '%s must be one of "discrete_values": %s' % (header, value['discrete_values'])
raise ModelValidationError(message)
return fields_dict
def _validate_defaults(self, fields_dict):
# validate key names in fields
for key, value in fields_dict.items():
# retrieve value type and type dict
value_type = self.keyCriteria[self.keyName.index(key)]['value_datatype']
# validate discrete value qualifiers against other criteria
qualifier = 'default_value'
if qualifier in value.keys():
multiple_values = False
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
qualifier_text = value_path + item_text
header = 'Value %s%s%s for %s' % (quote_text, test_value, quote_text, qualifier_text)
if value_type == 'list':
default_item_key = '%s[0]' % key
try:
self.validate(test_value, default_item_key, object_title=header)
except Exception as err:
raise ModelValidationError(str(err).strip().replace('field %s' % default_item_key, qualifier_text))
return fields_dict
def _evaluate_field(self, record_dict, field_name, field_criteria):
''' a helper method for evaluating record values based upon query criteria
:param record_dict: dictionary with model valid data to evaluate
:param field_name: string with path to root of query field
:param field_criteria: dictionary with query operators and qualifiers
:return: boolean (True if no field_criteria evaluate to false)
'''
# determine value existence criteria
value_exists = True
if 'value_exists' in field_criteria.keys():
if not field_criteria['value_exists']:
value_exists = False
# validate existence of field
field_exists = True
try:
record_values = self._walk(field_name, record_dict)
except:
field_exists = False
# evaluate existence query criteria
if value_exists != field_exists:
return False
elif not value_exists:
return True
# convert javascript dot_path to class dot_path
field_key = field_name
if not field_name:
field_key = '.'
else:
if field_name[0] != '.':
field_key = '.%s' % field_name
# evaluate other query criteria
for key, value in field_criteria.items():
if key in ('min_size', 'min_length'):
found = False
if self.keyMap[field_key]['value_datatype'] == 'map':
for record_value in record_values:
record_size = sys.getsizeof(json.dumps(str(record_value)).replace(' ','')) - 51
if record_size >= value:
found = True
break
else:
for record_value in record_values:
if len(record_value) >= value:
found = True
break
if not found:
return False
elif key in ('max_size', 'max_length'):
found = False
if self.keyMap[field_key]['value_datatype'] == 'map':
for record_value in record_values:
record_size = sys.getsizeof(json.dumps(str(record_value)).replace(' ','')) - 51
if record_size <= value:
found = True
break
else:
for record_value in record_values:
if len(record_value) <= value:
found = True
break
if not found:
return False
elif key == 'min_value':
found = False
for record_value in record_values:
if record_value >= value:
found = True
break
if not found:
return False
elif key == 'max_value':
found = False
for record_value in record_values:
if record_value <= value:
found = True
break
if not found:
return False
elif key == 'equal_to':
found = False
for record_value in record_values:
if record_value == value:
found = True
break
if not found:
return False
elif key == 'greater_than':
found = False
for record_value in record_values:
if record_value > value:
found = True
break
if not found:
return False
elif key == 'less_than':
found = False
for record_value in record_values:
if record_value < value:
found = True
break
if not found:
return False
elif key == 'excluded_values':
for record_value in record_values:
if record_value in value:
return False
elif key == 'discrete_values':
found = False
for record_value in record_values:
if record_value in value:
found = True
break
if not found:
return False
elif key == 'integer_data':
found = False
dummy_int = 1
for record_value in record_values:
if record_value.__class__ == dummy_int.__class__:
found = True
break
if value != found:
return False
elif key == 'byte_data':
found = False
for record_value in record_values:
try:
decoded_bytes = b64decode(record_value)
except:
decoded_bytes = ''
if isinstance(decoded_bytes, bytes):
found = True
break
if value != found:
return False
elif key == 'must_contain':
for regex in value:
regex_pattern = re.compile(regex)
found = False
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if not found:
return False
elif key == 'must_not_contain':
for regex in value:
regex_pattern = re.compile(regex)
found = False
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if found:
return False
elif key == 'contains_either':
found = False
for regex in value:
regex_pattern = re.compile(regex)
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if found:
break
if not found:
return False
elif key == 'unique_values':
for record_value in record_values:
unique_values = True
if len(record_value) != len(set(record_value)):
unique_values = False
if value != unique_values:
return False
return True
def _validate_dict(self, input_dict, schema_dict, path_to_root, object_title=''):
''' a helper method for recursively validating keys in dictionaries
:return input_dict
'''
# reconstruct key path to current dictionary in model
rules_top_level_key = re.sub('\[\d+\]', '[0]', path_to_root)
map_rules = self.keyMap[rules_top_level_key]
# construct list error report template
map_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': map_rules,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': 0,
'error_code': 4001
}
# validate map size
if 'min_size' in map_rules.keys():
input_size = sys.getsizeof(json.dumps(str(input_dict)).replace(' ','')) - 51
if input_size < map_rules['min_size']:
map_error['failed_test'] = 'min_size'
map_error['error_value'] = input_size
map_error['error_code'] = 4031
raise InputValidationError(map_error)
if 'max_size' in map_rules.keys():
input_size = sys.getsizeof(json.dumps(str(input_dict)).replace(' ','')) - 51
if input_size > map_rules['max_size']:
map_error['failed_test'] = 'max_size'
map_error['error_value'] = input_size
map_error['error_code'] = 4032
raise InputValidationError(map_error)
# construct lists of keys in input dictionary
input_keys = []
input_key_list = []
for key in input_dict.keys():
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'key_datatype',
'input_path': path_to_root,
'error_value': key,
'error_code': 4004
}
error_dict['input_criteria']['key_datatype'] = 'string'
if path_to_root == '.':
if not isinstance(key, str):
input_key_name = path_to_root + str(key)
error_dict['input_path'] = input_key_name
raise InputValidationError(error_dict)
input_key_name = path_to_root + key
else:
if not isinstance(key, str):
input_key_name = path_to_root + '.' + str(key)
error_dict['input_path'] = input_key_name
raise InputValidationError(error_dict)
input_key_name = path_to_root + '.' + key
input_keys.append(input_key_name)
input_key_list.append(key)
# TODO: validate top-level key and values against identical to reference
# TODO: run lambda function and call validation
# construct lists of keys in schema dictionary
max_keys = []
max_key_list = []
req_keys = []
req_key_list = []
for key in schema_dict.keys():
if path_to_root == '.':
schema_key_name = path_to_root + key
else:
schema_key_name = path_to_root + '.' + key
max_keys.append(schema_key_name)
max_key_list.append(key)
rules_schema_key_name = re.sub('\[\d+\]', '[0]', schema_key_name)
if self.keyMap[rules_schema_key_name]['required_field']:
req_keys.append(schema_key_name)
req_key_list.append(key)
# validate existence of required fields
missing_keys = set(req_keys) - set(input_keys)
if missing_keys:
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'required_field',
'input_path': path_to_root,
'error_value': list(missing_keys),
'error_code': 4002
}
error_dict['input_criteria']['required_keys'] = req_keys
raise InputValidationError(error_dict)
# validate existence of extra fields
extra_keys = set(input_keys) - set(max_keys)
if extra_keys and not self.keyMap[rules_top_level_key]['extra_fields']:
extra_key_list = []
for key in extra_keys:
pathless_key = re.sub(rules_top_level_key, '', key, count=1)
extra_key_list.append(pathless_key)
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'extra_fields',
'input_path': path_to_root,
'error_value': extra_key_list,
'error_code': 4003
}
error_dict['input_criteria']['maximum_scope'] = max_key_list
raise InputValidationError(error_dict)
# validate datatype of value
for key, value in input_dict.items():
if path_to_root == '.':
input_key_name = path_to_root + key
else:
input_key_name = path_to_root + '.' + key
rules_input_key_name = re.sub('\[\d+\]', '[0]', input_key_name)
if input_key_name in max_keys:
input_criteria = self.keyMap[rules_input_key_name]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': input_key_name,
'error_value': value,
'error_code': 4001
}
try:
value_index = self._datatype_classes.index(value.__class__)
except:
error_dict['error_value'] = value.__class__.__name__
raise InputValidationError(error_dict)
value_type = self._datatype_names[value_index]
if input_criteria['value_datatype'] == 'null':
pass
else:
if value_type != input_criteria['value_datatype']:
raise InputValidationError(error_dict)
# call appropriate validation sub-routine for datatype of value
if value_type == 'boolean':
input_dict[key] = self._validate_boolean(value, input_key_name, object_title)
elif value_type == 'number':
input_dict[key] = self._validate_number(value, input_key_name, object_title)
elif value_type == 'string':
input_dict[key] = self._validate_string(value, input_key_name, object_title)
elif value_type == 'map':
input_dict[key] = self._validate_dict(value, schema_dict[key], input_key_name, object_title)
elif value_type == 'list':
input_dict[key] = self._validate_list(value, schema_dict[key], input_key_name, object_title)
# set default values for empty optional fields
for key in max_key_list:
if key not in input_key_list:
indexed_key = max_keys[max_key_list.index(key)]
if indexed_key in self.components.keys():
if 'default_value' in self.components[indexed_key]:
input_dict[key] = self.components[indexed_key]['default_value']
return input_dict
def _validate_list(self, input_list, schema_list, path_to_root, object_title=''):
'''
a helper method for recursively validating items in a list
:return: input_list
'''
# construct rules for list and items
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
list_rules = self.keyMap[rules_path_to_root]
initial_key = rules_path_to_root + '[0]'
item_rules = self.keyMap[initial_key]
# construct list error report template
list_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': list_rules,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': 0,
'error_code': 4001
}
# validate list rules
if 'min_size' in list_rules.keys():
if len(input_list) < list_rules['min_size']:
list_error['failed_test'] = 'min_size'
list_error['error_value'] = len(input_list)
list_error['error_code'] = 4031
raise InputValidationError(list_error)
if 'max_size' in list_rules.keys():
if len(input_list) > list_rules['max_size']:
list_error['failed_test'] = 'max_size'
list_error['error_value'] = len(input_list)
list_error['error_code'] = 4032
raise InputValidationError(list_error)
# construct item error report template
item_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': item_rules,
'failed_test': 'value_datatype',
'input_path': initial_key,
'error_value': None,
'error_code': 4001
}
# validate datatype of items
for i in range(len(input_list)):
input_path = path_to_root + '[%s]' % i
item = input_list[i]
item_error['input_path'] = input_path
try:
item_index = self._datatype_classes.index(item.__class__)
except:
item_error['error_value'] = item.__class__.__name__
raise InputValidationError(item_error)
item_type = self._datatype_names[item_index]
item_error['error_value'] = item
if item_rules['value_datatype'] == 'null':
pass
else:
if item_type != item_rules['value_datatype']:
raise InputValidationError(item_error)
# call appropriate validation sub-routine for datatype of item
if item_type == 'boolean':
input_list[i] = self._validate_boolean(item, input_path, object_title)
elif item_type == 'number':
input_list[i] = self._validate_number(item, input_path, object_title)
elif item_type == 'string':
input_list[i] = self._validate_string(item, input_path, object_title)
elif item_type == 'map':
input_list[i] = self._validate_dict(item, schema_list[0], input_path, object_title)
elif item_type == 'list':
input_list[i] = self._validate_list(item, schema_list[0], input_path, object_title)
# validate unique values in list
if 'unique_values' in list_rules.keys():
if len(set(input_list)) < len(input_list):
list_error['failed_test'] = 'unique_values'
list_error['error_value'] = input_list
list_error['error_code'] = 4033
raise InputValidationError(list_error)
# TODO: validate top-level item values against identical to reference
# TODO: run lambda function and call validation url
return input_list
def _validate_number(self, input_number, path_to_root, object_title=''):
'''
a helper method for validating properties of a number
:return: input_number
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_number,
'error_code': 4001
}
if 'integer_data' in input_criteria.keys():
if input_criteria['integer_data'] and not isinstance(input_number, int):
error_dict['failed_test'] = 'integer_data'
error_dict['error_code'] = 4021
raise InputValidationError(error_dict)
if 'min_value' in input_criteria.keys():
if input_number < input_criteria['min_value']:
error_dict['failed_test'] = 'min_value'
error_dict['error_code'] = 4022
raise InputValidationError(error_dict)
if 'max_value' in input_criteria.keys():
if input_number > input_criteria['max_value']:
error_dict['failed_test'] = 'max_value'
error_dict['error_code'] = 4023
raise InputValidationError(error_dict)
if 'greater_than' in input_criteria.keys():
if input_number <= input_criteria['greater_than']:
error_dict['failed_test'] = 'greater_than'
error_dict['error_code'] = 4024
raise InputValidationError(error_dict)
if 'less_than' in input_criteria.keys():
if input_number >= input_criteria['less_than']:
error_dict['failed_test'] = 'less_than'
error_dict['error_code'] = 4025
raise InputValidationError(error_dict)
if 'equal_to' in input_criteria.keys():
if input_number != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
if 'discrete_values' in input_criteria.keys():
if input_number not in input_criteria['discrete_values']:
error_dict['failed_test'] = 'discrete_values'
error_dict['error_code'] = 4041
raise InputValidationError(error_dict)
if 'excluded_values' in input_criteria.keys():
if input_number in input_criteria['excluded_values']:
error_dict['failed_test'] = 'excluded_values'
error_dict['error_code'] = 4042
raise InputValidationError(error_dict)
# TODO: validate number against identical to reference
# TODO: run lambda function and call validation url
return input_number
def _validate_string(self, input_string, path_to_root, object_title=''):
'''
a helper method for validating properties of a string
:return: input_string
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_string,
'error_code': 4001
}
if 'byte_data' in input_criteria.keys():
if input_criteria['byte_data']:
error_dict['failed_test'] = 'byte_data'
error_dict['error_code'] = 4011
try:
decoded_bytes = b64decode(input_string)
except:
raise InputValidationError(error_dict)
if not isinstance(decoded_bytes, bytes):
raise InputValidationError(error_dict)
if 'min_value' in input_criteria.keys():
if input_string < input_criteria['min_value']:
error_dict['failed_test'] = 'min_value'
error_dict['error_code'] = 4022
raise InputValidationError(error_dict)
if 'max_value' in input_criteria.keys():
if input_string > input_criteria['max_value']:
error_dict['failed_test'] = 'max_value'
error_dict['error_code'] = 4023
raise InputValidationError(error_dict)
if 'greater_than' in input_criteria.keys():
if input_string <= input_criteria['greater_than']:
error_dict['failed_test'] = 'greater_than'
error_dict['error_code'] = 4024
raise InputValidationError(error_dict)
if 'less_than' in input_criteria.keys():
if input_string >= input_criteria['less_than']:
error_dict['failed_test'] = 'less_than'
error_dict['error_code'] = 4025
raise InputValidationError(error_dict)
if 'equal_to' in input_criteria.keys():
if input_string != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
if 'min_length' in input_criteria.keys():
if len(input_string) < input_criteria['min_length']:
error_dict['failed_test'] = 'min_length'
error_dict['error_code'] = 4012
raise InputValidationError(error_dict)
if 'max_length' in input_criteria.keys():
if len(input_string) > input_criteria['max_length']:
error_dict['failed_test'] = 'max_length'
error_dict['error_code'] = 4013
raise InputValidationError(error_dict)
if 'must_not_contain' in input_criteria.keys():
for regex in input_criteria['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(input_string):
error_dict['failed_test'] = 'must_not_contain'
error_dict['error_code'] = 4014
raise InputValidationError(error_dict)
if 'must_contain' in input_criteria.keys():
for regex in input_criteria['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(input_string):
error_dict['failed_test'] = 'must_contain'
error_dict['error_code'] = 4015
raise InputValidationError(error_dict)
if 'contains_either' in input_criteria.keys():
regex_match = False
for regex in input_criteria['contains_either']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(input_string):
regex_match = True
if not regex_match:
error_dict['failed_test'] = 'contains_either'
error_dict['error_code'] = 4016
raise InputValidationError(error_dict)
if 'discrete_values' in input_criteria.keys():
if input_string not in input_criteria['discrete_values']:
error_dict['failed_test'] = 'discrete_values'
error_dict['error_code'] = 4041
raise InputValidationError(error_dict)
if 'excluded_values' in input_criteria.keys():
if input_string in input_criteria['excluded_values']:
error_dict['failed_test'] = 'excluded_values'
error_dict['error_code'] = 4042
raise InputValidationError(error_dict)
# TODO: validate string against identical to reference
# TODO: run lambda function and call validation url
return input_string
def _ingest_dict(self, input_dict, schema_dict, path_to_root):
'''
a helper method for ingesting keys, value pairs in a dictionary
:return: valid_dict
'''
valid_dict = {}
# construct path to root for rules
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
# iterate over keys in schema dict
for key, value in schema_dict.items():
key_path = path_to_root
if not key_path == '.':
key_path += '.'
key_path += key
rules_key_path = re.sub('\[\d+\]', '[0]', key_path)
value_match = False
if key in input_dict.keys():
value_index = self._datatype_classes.index(value.__class__)
value_type = self._datatype_names[value_index]
try:
v_index = self._datatype_classes.index(input_dict[key].__class__)
v_type = self._datatype_names[v_index]
if v_type == value_type:
value_match = True
except:
value_match = False
if value_match:
if value_type == 'null':
valid_dict[key] = input_dict[key]
elif value_type == 'boolean':
valid_dict[key] = self._ingest_boolean(input_dict[key], key_path)
elif value_type == 'number':
valid_dict[key] = self._ingest_number(input_dict[key], key_path)
elif value_type == 'string':
valid_dict[key] = self._ingest_string(input_dict[key], key_path)
elif value_type == 'map':
valid_dict[key] = self._ingest_dict(input_dict[key], schema_dict[key], key_path)
elif value_type == 'list':
valid_dict[key] = self._ingest_list(input_dict[key], schema_dict[key], key_path)
else:
value_type = self.keyMap[rules_key_path]['value_datatype']
if 'default_value' in self.keyMap[rules_key_path]:
valid_dict[key] = self.keyMap[rules_key_path]['default_value']
elif value_type == 'null':
valid_dict[key] = None
elif value_type == 'boolean':
valid_dict[key] = False
elif value_type == 'number':
valid_dict[key] = 0.0
if 'integer_data' in self.keyMap[rules_key_path].keys():
if self.keyMap[rules_key_path]['integer_data']:
valid_dict[key] = 0
elif value_type == 'string':
valid_dict[key] = ''
elif value_type == 'list':
valid_dict[key] = []
elif value_type == 'map':
valid_dict[key] = self._ingest_dict({}, schema_dict[key], key_path)
# add extra fields if set to True
if self.keyMap[rules_path_to_root]['extra_fields']:
for key, value in input_dict.items():
if key not in valid_dict.keys():
valid_dict[key] = value
return valid_dict
def _ingest_list(self, input_list, schema_list, path_to_root):
'''
a helper method for ingesting items in a list
:return: valid_list
'''
valid_list = []
# construct max list size
max_size = None
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'max_size' in self.keyMap[rules_path_to_root].keys():
if not self.keyMap[rules_path_to_root]['max_size']:
return valid_list
else:
max_size = self.keyMap[rules_path_to_root]['max_size']
# iterate over items in input list
if input_list:
rules_index = self._datatype_classes.index(schema_list[0].__class__)
rules_type = self._datatype_names[rules_index]
for i in range(len(input_list)):
item_path = '%s[%s]' % (path_to_root, i)
value_match = False
try:
item_index = self._datatype_classes.index(input_list[i].__class__)
item_type = self._datatype_names[item_index]
if item_type == rules_type:
value_match = True
except:
value_match = False
if value_match:
try:
if item_type == 'boolean':
valid_list.append(self._validate_boolean(input_list[i], item_path))
elif item_type == 'number':
valid_list.append(self._validate_number(input_list[i], item_path))
elif item_type == 'string':
valid_list.append(self._validate_string(input_list[i], item_path))
elif item_type == 'map':
valid_list.append(self._ingest_dict(input_list[i], schema_list[0], item_path))
elif item_type == 'list':
valid_list.append(self._ingest_list(input_list[i], schema_list[0], item_path))
except:
pass
if isinstance(max_size, int):
if len(valid_list) == max_size:
return valid_list
return valid_list
def _ingest_number(self, input_number, path_to_root):
'''
a helper method for ingesting a number
:return: valid_number
'''
valid_number = 0.0
try:
valid_number = self._validate_number(input_number, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_number = self.keyMap[rules_path_to_root]['default_value']
elif 'integer_data' in self.keyMap[rules_path_to_root].keys():
if self.keyMap[rules_path_to_root]['integer_data']:
valid_number = 0
return valid_number
def _ingest_string(self, input_string, path_to_root):
'''
a helper method for ingesting a string
:return: valid_string
'''
valid_string = ''
try:
valid_string = self._validate_string(input_string, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_string = self.keyMap[rules_path_to_root]['default_value']
return valid_string
def _ingest_boolean(self, input_boolean, path_to_root):
'''
a helper method for ingesting a boolean
:return: valid_boolean
'''
valid_boolean = False
try:
valid_boolean = self._validate_boolean(input_boolean, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_boolean = self.keyMap[rules_path_to_root]['default_value']
return valid_boolean
def _reconstruct(self, path_to_root):
'''
a helper method for finding the schema endpoint from a path to root
:param path_to_root: string with dot path to root from
:return: list, dict, string, number, or boolean at path to root
'''
# split path to root into segments
item_pattern = re.compile('\d+\\]')
dot_pattern = re.compile('\\.|\\[')
path_segments = dot_pattern.split(path_to_root)
# construct base schema endpoint
schema_endpoint = self.schema
# reconstruct schema endpoint from segments
if path_segments[1]:
for i in range(1,len(path_segments)):
if item_pattern.match(path_segments[i]):
schema_endpoint = schema_endpoint[0]
else:
schema_endpoint = schema_endpoint[path_segments[i]]
return schema_endpoint
def _walk(self, path_to_root, record_dict):
'''
a helper method for finding the record endpoint from a path to root
:param path_to_root: string with dot path to root from
:param record_dict:
:return: list, dict, string, number, or boolean at path to root
'''
# split path to root into segments
item_pattern = re.compile('\d+\\]')
dot_pattern = re.compile('\\.|\\[')
path_segments = dot_pattern.split(path_to_root)
# construct empty fields
record_endpoints = []
# determine starting position
if not path_segments[0]:
path_segments.pop(0)
# define internal recursive function
def _walk_int(path_segments, record_dict):
record_endpoint = record_dict
for i in range(0, len(path_segments)):
if item_pattern.match(path_segments[i]):
for j in range(0, len(record_endpoint)):
if len(path_segments) == 2:
record_endpoints.append(record_endpoint[j])
else:
stop_chain = False
for x in range(0, i):
if item_pattern.match(path_segments[x]):
stop_chain = True
if not stop_chain:
shortened_segments = []
for z in range(i + 1, len(path_segments)):
shortened_segments.append(path_segments[z])
_walk_int(shortened_segments, record_endpoint[j])
else:
stop_chain = False
for y in range(0, i):
if item_pattern.match(path_segments[y]):
stop_chain = True
if not stop_chain:
if len(path_segments) == i + 1:
record_endpoints.append(record_endpoint[path_segments[i]])
else:
record_endpoint = record_endpoint[path_segments[i]]
# conduct recursive walk
_walk_int(path_segments, record_dict)
return record_endpoints
def validate(self, input_data, path_to_root='', object_title=''):
'''
a core method for validating input against the model
input_data is only returned if all data is valid
:param input_data: list, dict, string, number, or boolean to validate
:param path_to_root: [optional] string with dot-path of model component
:param object_title: [optional] string with name of input to validate
:return: input_data (or InputValidationError)
'''
__name__ = '%s.validate' % self.__class__.__name__
_path_arg = '%s(path_to_root="...")' % __name__
_title_arg = '%s(object_title="...")' % __name__
# validate input
copy_path = path_to_root
if path_to_root:
if not isinstance(path_to_root, str):
raise ModelValidationError('%s must be a string.' % _path_arg)
else:
if path_to_root[0] != '.':
copy_path = '.%s' % path_to_root
if not copy_path in self.keyMap.keys():
raise ModelValidationError('%s does not exist in components %s.' % (_path_arg.replace('...', path_to_root), self.keyMap.keys()))
else:
copy_path = '.'
if object_title:
if not isinstance(object_title, str):
raise ModelValidationError('%s must be a string' % _title_arg)
# construct generic error dictionary
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[copy_path],
'failed_test': 'value_datatype',
'input_path': copy_path,
'error_value': input_data,
'error_code': 4001
}
# determine value type of input data
try:
input_index = self._datatype_classes.index(input_data.__class__)
except:
error_dict['error_value'] = input_data.__class__.__name__
raise InputValidationError(error_dict)
input_type = self._datatype_names[input_index]
# validate input data type
if input_type != self.keyMap[copy_path]['value_datatype']:
raise InputValidationError(error_dict)
# run helper method appropriate to data type
if input_type == 'boolean':
input_data = self._validate_boolean(input_data, copy_path, object_title)
elif input_type == 'number':
input_data = self._validate_number(input_data, copy_path, object_title)
elif input_type == 'string':
input_data = self._validate_string(input_data, copy_path, object_title)
elif input_type == 'list':
schema_list = self._reconstruct(copy_path)
input_data = self._validate_list(input_data, schema_list, copy_path, object_title)
elif input_type == 'map':
schema_dict = self._reconstruct(copy_path)
input_data = self._validate_dict(input_data, schema_dict, copy_path, object_title)
return input_data
def ingest(self, **kwargs):
'''
a core method to ingest and validate arbitrary keyword data
**NOTE: data is always returned with this method**
for each key in the model, a value is returned according
to the following priority:
1. value in kwargs if field passes validation test
2. default value declared for the key in the model
3. empty value appropriate to datatype of key in the model
**NOTE: as long as a default value is provided for each key-
value, returned data will be model valid
**NOTE: if 'extra_fields' is True for a dictionary, the key-
value pair of all fields in kwargs which are not declared in
the model will also be added to the corresponding dictionary
data
**NOTE: if 'max_size' is declared for a list, method will
stop adding input to the list once it reaches max size
:param kwargs: key, value pairs
:return: dictionary with keys and value
'''
__name__ = '%s.ingest' % self.__class__.__name__
schema_dict = self.schema
path_to_root = '.'
valid_data = self._ingest_dict(kwargs, schema_dict, path_to_root)
return valid_data
def query(self, query_criteria, valid_record=None):
'''
a core method for querying model valid data with criteria
**NOTE: input is only returned if all fields & qualifiers are valid for model
:param query_criteria: dictionary with model field names and query qualifiers
:param valid_record: dictionary with model valid record
:return: boolean (or QueryValidationError)
an example of how to construct the query_criteria argument:
query_criteria = {
'.path.to.number': {
'min_value': 4.5
},
'.path.to.string': {
'must_contain': [ '\\regex' ]
}
}
**NOTE: for a full list of operators for query_criteria based upon field
datatype, see either the query-rules.json file or REFERENCE file
'''
__name__ = '%s.query' % self.__class__.__name__
_query_arg = '%s(query_criteria={...})' % __name__
_record_arg = '%s(valid_record={...})' % __name__
# validate input
if not isinstance(query_criteria, dict):
raise ModelValidationError('%s must be a dictionary.' % _query_arg)
# convert javascript dot_path to class dot_path
criteria_copy = {}
equal_fields = []
dot_fields = []
for key, value in query_criteria.items():
copy_key = key
if not key:
copy_key = '.'
else:
if key[0] != '.':
copy_key = '.%s' % key
dot_fields.append(copy_key)
criteria_copy[copy_key] = value
if value.__class__ in self._datatype_classes[0:4]:
criteria_copy[copy_key] = {
'equal_to': value
}
equal_fields.append(copy_key)
# validate query criteria against query rules
query_kwargs = {
'fields_dict': criteria_copy,
'fields_rules': self.queryRules,
'declared_value': False
}
try:
self._validate_fields(**query_kwargs)
except ModelValidationError as err:
message = err.error['message']
for field in equal_fields:
equal_error = 'field %s qualifier equal_to' % field
if message.find(equal_error) > -1:
message = message.replace(equal_error, 'field %s' % field)
break
field_pattern = re.compile('ield\s(\..*?)\s')
field_name = field_pattern.findall(message)
if field_name:
if field_name[0] in dot_fields:
def _replace_field(x):
return 'ield %s ' % x.group(1)[1:]
message = field_pattern.sub(_replace_field, message)
raise QueryValidationError(message)
# query test record
if valid_record:
if not isinstance(valid_record, dict):
raise ModelValidationError('%s must be a dictionary.' % _record_arg)
for key, value in criteria_copy.items():
eval_outcome = self._evaluate_field(valid_record, key, value)
if not eval_outcome:
return False
return True
|
collectiveacuity/jsonModel
|
jsonmodel/validators.py
|
jsonModel._ingest_dict
|
python
|
def _ingest_dict(self, input_dict, schema_dict, path_to_root):
'''
a helper method for ingesting keys, value pairs in a dictionary
:return: valid_dict
'''
valid_dict = {}
# construct path to root for rules
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
# iterate over keys in schema dict
for key, value in schema_dict.items():
key_path = path_to_root
if not key_path == '.':
key_path += '.'
key_path += key
rules_key_path = re.sub('\[\d+\]', '[0]', key_path)
value_match = False
if key in input_dict.keys():
value_index = self._datatype_classes.index(value.__class__)
value_type = self._datatype_names[value_index]
try:
v_index = self._datatype_classes.index(input_dict[key].__class__)
v_type = self._datatype_names[v_index]
if v_type == value_type:
value_match = True
except:
value_match = False
if value_match:
if value_type == 'null':
valid_dict[key] = input_dict[key]
elif value_type == 'boolean':
valid_dict[key] = self._ingest_boolean(input_dict[key], key_path)
elif value_type == 'number':
valid_dict[key] = self._ingest_number(input_dict[key], key_path)
elif value_type == 'string':
valid_dict[key] = self._ingest_string(input_dict[key], key_path)
elif value_type == 'map':
valid_dict[key] = self._ingest_dict(input_dict[key], schema_dict[key], key_path)
elif value_type == 'list':
valid_dict[key] = self._ingest_list(input_dict[key], schema_dict[key], key_path)
else:
value_type = self.keyMap[rules_key_path]['value_datatype']
if 'default_value' in self.keyMap[rules_key_path]:
valid_dict[key] = self.keyMap[rules_key_path]['default_value']
elif value_type == 'null':
valid_dict[key] = None
elif value_type == 'boolean':
valid_dict[key] = False
elif value_type == 'number':
valid_dict[key] = 0.0
if 'integer_data' in self.keyMap[rules_key_path].keys():
if self.keyMap[rules_key_path]['integer_data']:
valid_dict[key] = 0
elif value_type == 'string':
valid_dict[key] = ''
elif value_type == 'list':
valid_dict[key] = []
elif value_type == 'map':
valid_dict[key] = self._ingest_dict({}, schema_dict[key], key_path)
# add extra fields if set to True
if self.keyMap[rules_path_to_root]['extra_fields']:
for key, value in input_dict.items():
if key not in valid_dict.keys():
valid_dict[key] = value
return valid_dict
|
a helper method for ingesting keys, value pairs in a dictionary
:return: valid_dict
|
train
|
https://github.com/collectiveacuity/jsonModel/blob/1ea64c36d78add3faa7b85ff82c5ec685458c940/jsonmodel/validators.py#L1202-L1272
|
[
"def _ingest_dict(self, input_dict, schema_dict, path_to_root):\n\n '''\n a helper method for ingesting keys, value pairs in a dictionary\n\n :return: valid_dict\n '''\n\n valid_dict = {}\n\n# construct path to root for rules\n rules_path_to_root = re.sub('\\[\\d+\\]', '[0]', path_to_root)\n\n# iterate over keys in schema dict\n for key, value in schema_dict.items():\n key_path = path_to_root\n if not key_path == '.':\n key_path += '.'\n key_path += key\n rules_key_path = re.sub('\\[\\d+\\]', '[0]', key_path)\n value_match = False\n if key in input_dict.keys():\n value_index = self._datatype_classes.index(value.__class__)\n value_type = self._datatype_names[value_index]\n try:\n v_index = self._datatype_classes.index(input_dict[key].__class__)\n v_type = self._datatype_names[v_index]\n if v_type == value_type:\n value_match = True\n except:\n value_match = False\n if value_match:\n if value_type == 'null':\n valid_dict[key] = input_dict[key]\n elif value_type == 'boolean':\n valid_dict[key] = self._ingest_boolean(input_dict[key], key_path)\n elif value_type == 'number':\n valid_dict[key] = self._ingest_number(input_dict[key], key_path)\n elif value_type == 'string':\n valid_dict[key] = self._ingest_string(input_dict[key], key_path)\n elif value_type == 'map':\n valid_dict[key] = self._ingest_dict(input_dict[key], schema_dict[key], key_path)\n elif value_type == 'list':\n valid_dict[key] = self._ingest_list(input_dict[key], schema_dict[key], key_path)\n else:\n value_type = self.keyMap[rules_key_path]['value_datatype']\n if 'default_value' in self.keyMap[rules_key_path]:\n valid_dict[key] = self.keyMap[rules_key_path]['default_value']\n elif value_type == 'null':\n valid_dict[key] = None\n elif value_type == 'boolean':\n valid_dict[key] = False\n elif value_type == 'number':\n valid_dict[key] = 0.0\n if 'integer_data' in self.keyMap[rules_key_path].keys():\n if self.keyMap[rules_key_path]['integer_data']:\n valid_dict[key] = 0\n elif value_type == 'string':\n valid_dict[key] = ''\n elif value_type == 'list':\n valid_dict[key] = []\n elif value_type == 'map':\n valid_dict[key] = self._ingest_dict({}, schema_dict[key], key_path)\n\n# add extra fields if set to True\n if self.keyMap[rules_path_to_root]['extra_fields']:\n for key, value in input_dict.items():\n if key not in valid_dict.keys():\n valid_dict[key] = value\n\n return valid_dict\n",
"def _ingest_list(self, input_list, schema_list, path_to_root):\n\n '''\n a helper method for ingesting items in a list\n\n :return: valid_list\n '''\n\n valid_list = []\n\n# construct max list size\n max_size = None\n rules_path_to_root = re.sub('\\[\\d+\\]', '[0]', path_to_root)\n if 'max_size' in self.keyMap[rules_path_to_root].keys():\n if not self.keyMap[rules_path_to_root]['max_size']:\n return valid_list\n else:\n max_size = self.keyMap[rules_path_to_root]['max_size']\n\n# iterate over items in input list\n if input_list:\n rules_index = self._datatype_classes.index(schema_list[0].__class__)\n rules_type = self._datatype_names[rules_index]\n for i in range(len(input_list)):\n item_path = '%s[%s]' % (path_to_root, i)\n value_match = False\n try:\n item_index = self._datatype_classes.index(input_list[i].__class__)\n item_type = self._datatype_names[item_index]\n if item_type == rules_type:\n value_match = True\n except:\n value_match = False\n if value_match:\n try:\n if item_type == 'boolean':\n valid_list.append(self._validate_boolean(input_list[i], item_path))\n elif item_type == 'number':\n valid_list.append(self._validate_number(input_list[i], item_path))\n elif item_type == 'string':\n valid_list.append(self._validate_string(input_list[i], item_path))\n elif item_type == 'map':\n valid_list.append(self._ingest_dict(input_list[i], schema_list[0], item_path))\n elif item_type == 'list':\n valid_list.append(self._ingest_list(input_list[i], schema_list[0], item_path))\n except:\n pass\n if isinstance(max_size, int):\n if len(valid_list) == max_size:\n return valid_list\n\n return valid_list\n",
"def _ingest_number(self, input_number, path_to_root):\n\n '''\n a helper method for ingesting a number\n\n :return: valid_number\n '''\n\n valid_number = 0.0\n\n try:\n valid_number = self._validate_number(input_number, path_to_root)\n except:\n rules_path_to_root = re.sub('\\[\\d+\\]', '[0]', path_to_root)\n if 'default_value' in self.keyMap[rules_path_to_root]:\n valid_number = self.keyMap[rules_path_to_root]['default_value']\n elif 'integer_data' in self.keyMap[rules_path_to_root].keys():\n if self.keyMap[rules_path_to_root]['integer_data']:\n valid_number = 0\n\n return valid_number\n",
"def _ingest_string(self, input_string, path_to_root):\n\n '''\n a helper method for ingesting a string\n\n :return: valid_string\n '''\n\n valid_string = ''\n\n try:\n valid_string = self._validate_string(input_string, path_to_root)\n except:\n rules_path_to_root = re.sub('\\[\\d+\\]', '[0]', path_to_root)\n if 'default_value' in self.keyMap[rules_path_to_root]:\n valid_string = self.keyMap[rules_path_to_root]['default_value']\n\n return valid_string\n",
"def _ingest_boolean(self, input_boolean, path_to_root):\n\n '''\n a helper method for ingesting a boolean\n\n :return: valid_boolean\n '''\n\n valid_boolean = False\n\n try:\n valid_boolean = self._validate_boolean(input_boolean, path_to_root)\n except:\n rules_path_to_root = re.sub('\\[\\d+\\]', '[0]', path_to_root)\n if 'default_value' in self.keyMap[rules_path_to_root]:\n valid_boolean = self.keyMap[rules_path_to_root]['default_value']\n\n return valid_boolean\n"
] |
class jsonModel(object):
__rules__ = jsonLoader('jsonmodel', 'models/model-rules.json')
def __init__(self, data_model, query_rules=None):
'''
a method for testing data model declaration & initializing the class
:param data_model: dictionary with json model architecture
:param query_rules: [optional] dictionary with valid field type qualifiers
:return: object with jsonModel methods
'''
# validate schema input
if not isinstance(data_model, dict):
raise ModelValidationError('Data model must be a dictionary.')
elif 'schema' not in data_model.keys():
raise ModelValidationError('Data model must have a schema key.')
elif not isinstance(data_model['schema'], dict):
raise ModelValidationError('Value for the data model "schema" field must be a dictionary.')
elif not data_model['schema']:
raise ModelValidationError('Data model "schema" field must not be empty.')
# construct base methods
from copy import deepcopy
data_model = deepcopy(data_model)
self.schema = data_model['schema']
model_map = mapModel(self.schema)
self.keyName = model_map.keyName
self.keyCriteria = model_map.keyCriteria
# construct protected type classes
self._datatype_names = mapModel._datatype_names
self._datatype_classes = mapModel._datatype_classes
# validate absence of item designators in keys
item_pattern = re.compile('\[\d+\]')
for i in range(len(self.keyName)):
patterns_found = item_pattern.findall(self.keyName[i])
if patterns_found:
for designator in patterns_found:
if designator != '[0]':
message = 'Key name for schema field %s must not contain the item designator pattern %s' % (self.keyName[i], designator)
raise ModelValidationError(message)
# validate existence of first item in list declarations
key_set = set(self.keyName)
for i in range(len(self.keyName)):
if self.keyCriteria[i]['value_datatype'] == 'list':
item_key = self.keyName[i] + '[0]'
if not item_key in key_set:
message = 'Schema field %s must declare an initial item for the list.' % self.keyName[i]
raise ModelValidationError(message)
# alter list requirement if first item is empty
else:
item_index = self.keyName.index(item_key)
if not self.keyCriteria[item_index]['required_field']:
self.keyCriteria[i]['required_field'] = False
# validate title input & construct title method
self.title = ''
if 'title' in data_model.keys():
if not isinstance(data_model['title'], str):
raise ModelValidationError('Value for model title must be a string.')
self.title = data_model['title']
# validate description input & construct description method
self.description = ''
if 'description' in data_model.keys():
if not isinstance(data_model['description'], str):
raise ModelValidationError('Value for model description must be a string.')
self.description = data_model['description']
# validate url input & construct title method
self.url = ''
if 'url' in data_model.keys():
if not isinstance(data_model['url'], str):
raise ModelValidationError('Value for model url must be a string.')
self.title = data_model['url']
# validate metadata input & construct metadata method
self.metadata = {}
if 'metadata' in data_model.keys():
if not isinstance(data_model['metadata'], dict):
raise ModelValidationError('Value for model metadata must be a dictionary.')
self.metadata = data_model['metadata']
# validate max size input & construct maxSize property
# self.maxSize = None
# if 'max_size' in data_model.keys():
# if not isinstance(data_model['max_size'], int):
# raise ModelValidationError('Value for model max_size must be a positive integer.')
# elif data_model['max_size'] < 0:
# raise ModelValidationError('Value for model max_size must be a positive integer.')
# elif data_model['max_size']:
# self.maxSize = data_model['max_size']
# validate components input & construct component property
self.components = {}
if 'components' in data_model.keys():
if not isinstance(data_model['components'], dict):
raise ModelValidationError('Value for model components must be a dictionary.')
self.components = self._validate_fields(data_model['components'], self.__rules__['components'])
# construct keyMap fields from key names and key criteria
self.keyMap = {}
for i in range(len(self.keyName)):
self.keyMap[self.keyName[i]] = self.keyCriteria[i]
for key, value in self.components.items():
# convert javascript dot_path to class dot_path
dot_key = ''
if not key:
dot_key = '.'
else:
if key[0] != '.':
dot_key = '.%s' % key
# add component declarations to keyMap
if key in self.keyMap.keys():
for k, v in self.components[key].items():
self.keyMap[key][k] = v
elif dot_key and dot_key in self.keyMap.keys():
for k, v in self.components[key].items():
self.keyMap[dot_key][k] = v
# validate default values in lists
self._validate_defaults(self.keyMap)
# construct queryRules property from class model rules
self.queryRules = {}
for key, value in self.__rules__['components'].items():
remove_from_query = [ 'required_field', 'default_value', 'example_values', 'field_title', 'field_position', 'field_description', 'field_metadata', 'extra_fields' ]
field_qualifiers = {
'value_exists': False
}
for k, v in value.items():
if k not in remove_from_query:
field_qualifiers[k] = v
self.queryRules[key] = field_qualifiers
# validate query rules input and replace queryRules property
if query_rules:
if not isinstance(query_rules, dict):
message = 'Value for query rules input must be a dictionary.'
raise ModelValidationError(message)
input_set = set(query_rules.keys())
req_set = set(self.queryRules.keys())
if input_set - req_set:
message = 'Query rules input may only have %s field key names.' % req_set
raise ModelValidationError(message)
elif req_set - input_set:
message = 'Query rules input must have all %s field key names.' % req_set
raise ModelValidationError(message)
for key in req_set:
if not isinstance(query_rules[key], dict):
message = 'Value for query rules %s field must be a dictionary.' % key
raise ModelValidationError(message)
input_qualifier_set = set(query_rules[key].keys())
req_qualifier_set = set(self.queryRules[key].keys())
if input_qualifier_set - req_qualifier_set:
message = 'Query rules field %s may only have qualifiers %s' % (key, req_qualifier_set)
raise ModelValidationError(message)
for k, v in query_rules[key].items():
if v.__class__ != self.queryRules[key][k].__class__:
qualifier_index = self._datatype_classes.index(self.queryRules[key][k].__class__)
qualifier_type = self._datatype_names[qualifier_index]
message = 'Value for query rules field %s qualifier %s must be a "%s" datatype.' % (key, k, qualifier_type)
raise ModelValidationError(message)
self.queryRules = query_rules
def _validate_fields(self, fields_dict, fields_rules, declared_value=True):
# validate key names in fields
for key, value in fields_dict.items():
# convert javascript dot_path to class dot_path
if not key:
key = '.'
else:
if key[0] != '.':
key = '.%s' % key
if key not in self.keyName:
raise ModelValidationError('Field %s is not a field declared in model schema.' % key)
elif not isinstance(value, dict):
raise ModelValidationError('Value for field %s must be a dictionary.' % key)
# validate field criteria are appropriate to field datatype
value_type = self.keyCriteria[self.keyName.index(key)]['value_datatype']
type_dict = {}
if value_type == 'string':
type_dict = fields_rules['.string_fields']
elif value_type == 'number':
type_dict = fields_rules['.number_fields']
elif value_type == 'boolean':
type_dict = fields_rules['.boolean_fields']
elif value_type == 'list':
type_dict = fields_rules['.list_fields']
elif value_type == 'map':
type_dict = fields_rules['.map_fields']
elif value_type == 'null':
type_dict = fields_rules['.null_fields']
if set(value.keys()) - set(type_dict.keys()):
raise ModelValidationError('Field %s may only have datatype %s qualifiers %s.' % (key, value_type, set(type_dict.keys())))
# validate criteria qualifier values are appropriate datatype
for k, v in value.items():
v_index = self._datatype_classes.index(v.__class__)
v_type = self._datatype_names[v_index]
qualifier_index = self._datatype_classes.index(type_dict[k].__class__)
qualifier_type = self._datatype_names[qualifier_index]
if v_type != qualifier_type:
message = 'Value for field %s qualifier %s must be a %s datatype.' % (key, k, qualifier_type)
raise ModelValidationError(message)
if qualifier_type == 'number':
if isinstance(type_dict[k], int):
if not isinstance(v, int):
message = 'Value for field %s qualifier %s must be an integer.' % (key, k)
raise ModelValidationError(message)
# validate internal logic of each qualifier value declaration
if k in ('must_not_contain', 'must_contain', 'contains_either'):
for item in v:
if not isinstance(item, str):
message = 'Each item in list field %s qualifier %s must be a string.' % (key, k)
raise ModelValidationError(message)
if k in ('min_length', 'max_length', 'min_size', 'max_size'):
if v < 0:
message = 'Value for field %s qualifier %s cannot be negative.' % (key, k)
raise ModelValidationError(message)
if k in ('discrete_values', 'excluded_values', 'example_values'):
for item in v:
if value_type == 'number':
if not isinstance(item, int) and not isinstance(item, float):
message = 'Each item in field %s qualifier %s list must be a number.' % (key, k)
raise ModelValidationError(message)
elif not isinstance(item, str):
message = 'Each item in list for field %s qualifier %s must be a string.' % (key, k)
raise ModelValidationError(message)
if k == 'identical_to':
if not v in self.keyName:
message = 'Value "%s" for field %s qualifier %s not found in components keys.' % (v, key, k)
raise ModelValidationError(message)
if k == 'unique_values':
if v:
item_name = key + '[0]'
item_type = self.keyCriteria[self.keyName.index(item_name)]['value_datatype']
if not item_type in ('number', 'string'):
message = 'Field %s[0] must be either a string or number if qualifier "unique_values": true' % key
raise ModelValidationError(message)
# validate lack of other qualifiers if value exist is false
if 'value_exists' in value.keys():
if not value['value_exists']:
if set(value.keys()) - {'value_exists'}:
message = 'If field %s qualifier value_exists: false, field may not have other qualifiers.' % key
raise ModelValidationError(message)
# validate size qualifiers against each other
size_qualifiers = ['min_size', 'max_size']
for qualifier in size_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
header = 'Value %s for %s' % (test_value, value_path)
if 'min_size' in value.keys():
if test_value < value['min_size']:
message = '%s must not be less than "min_size": %s' % (header, value['min_size'])
raise ModelValidationError(message)
if 'max_size' in value.keys():
if test_value > value['max_size']:
message = '%s must not be greater than "max_size": %s' % (header, value['max_size'])
raise ModelValidationError(message)
# validate length qualifiers against each other
length_qualifiers = ['min_length', 'max_length']
for qualifier in length_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
header = 'Value %s for %s' % (test_value, value_path)
if 'min_length' in value.keys():
if test_value < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if test_value > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
# validate range qualifiers against each other & length qualifiers
range_qualifiers = ['min_value', 'max_value', 'greater_than', 'less_than', 'equal_to']
for qualifier in range_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
header = 'Value %s%s%s for %s' % (quote_text, test_value, quote_text, value_path)
if 'min_value' in value.keys():
if test_value < value['min_value']:
message = '%s must not be less than "min_value": %s' % (header, value['min_value'])
raise ModelValidationError(message)
if 'max_value' in value.keys():
if test_value > value['max_value']:
message = '%s must not be greater than "max_value": %s' % (header, value['max_value'])
raise ModelValidationError(message)
if 'greater_than' in value.keys():
if test_value <= value['greater_than'] and not qualifier == 'greater_than':
message = '%s must be "greater_than": %s' % (header, value['greater_than'])
raise ModelValidationError(message)
if 'less_than' in value.keys():
if test_value >= value['less_than'] and not qualifier == 'less_than':
message = '%s must be "less_than": %s' % (header, value['less_than'])
raise ModelValidationError(message)
if 'min_length' in value.keys():
if len(test_value) < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if len(test_value) > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
if 'integer_data' in value.keys():
if value['integer_data']:
if not isinstance(test_value, int):
message = '%s must be an "integer_data".' % header
raise ModelValidationError(message)
if 'must_not_contain' in value.keys():
for regex in value['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
message = '%s matches regex pattern in "must_not_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'must_contain' in value.keys():
for regex in value['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(test_value):
message = '%s does not match regex pattern in "must_contain": ["%s"].' % (header, regex)
raise ModelValidationError(message)
if 'contains_either' in value.keys():
regex_match = False
regex_patterns = []
for regex in value['contains_either']:
regex_patterns.append(regex)
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
regex_match = True
if not regex_match:
message = '%s does not match any regex patterns in "contains_either": %s' % (header, regex_patterns)
raise ModelValidationError(message)
if 'byte_data' in value.keys():
if value['byte_data']:
if qualifier != 'equal_to':
message = '%s cannot be used with base64 encoded "byte_data".' % header
raise ModelValidationError(message)
# validate discrete value qualifiers against other criteria
schema_field = self.keyCriteria[self.keyName.index(key)]
discrete_qualifiers = ['declared_value', 'default_value', 'excluded_values', 'discrete_values', 'example_values']
for qualifier in discrete_qualifiers:
test_qualifier = False
if qualifier in schema_field:
test_qualifier = True
if qualifier == 'declared_value' and not schema_field[qualifier]:
test_qualifier = False
if qualifier in value.keys() or (test_qualifier and declared_value):
multiple_values = False
if qualifier in value.keys():
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
else:
test_list = [schema_field[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
header = 'Value %s%s%s for %s%s' % (quote_text, test_value, quote_text, value_path, item_text)
if 'min_value' in value.keys():
if test_value < value['min_value']:
message = '%s must not be less than "min_value": %s' % (header, value['min_value'])
raise ModelValidationError(message)
if 'max_value' in value.keys():
if test_value > value['max_value']:
message = '%s must not be greater than "max_value": %s' % (header, value['max_value'])
raise ModelValidationError(message)
if 'equal_to' in value.keys():
if test_value != value['equal_to']:
if qualifier != 'declared_value' and isinstance(test_value, bool):
message = '%s must be "equal_to": %s' % (header, value['equal_to'])
raise ModelValidationError(message)
if 'greater_than' in value.keys():
if test_value <= value['greater_than']:
message = '%s must be "greater_than": %s' % (header, value['greater_than'])
raise ModelValidationError(message)
if 'less_than' in value.keys():
if test_value >= value['less_than']:
message = '%s must be "less_than": %s' % (header, value['less_than'])
raise ModelValidationError(message)
if 'integer_data' in value.keys():
if value['integer_data']:
if not isinstance(test_value, int):
message = '%s must be an "integer_data".' % header
raise ModelValidationError(message)
if 'min_length' in value.keys():
if len(test_value) < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if len(test_value) > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
if 'must_not_contain' in value.keys():
for regex in value['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
message = '%s matches regex pattern in "must_not_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'must_contain' in value.keys():
for regex in value['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(test_value):
message = '%s does not match regex pattern in "must_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'contains_either' in value.keys():
regex_match = False
regex_patterns = []
for regex in value['contains_either']:
regex_patterns.append(regex)
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
regex_match = True
if not regex_match:
message = '%s does not match any regex patterns in "contains_either": %s' % (header, regex_patterns)
raise ModelValidationError(message)
if 'byte_data' in value.keys():
message = '%s cannot be base64 decoded to "byte_data".' % header
try:
decoded_bytes = b64decode(test_value)
except:
raise ModelValidationError(message)
if not isinstance(decoded_bytes, bytes):
raise ModelValidationError(message)
# validate discrete value qualifiers against each other
for qualifier in discrete_qualifiers:
test_qualifier = False
if qualifier in schema_field:
test_qualifier = True
if qualifier == 'declared_value' and not schema_field[qualifier]:
test_qualifier = False
if qualifier in value.keys() or (test_qualifier and declared_value):
multiple_values = False
if qualifier in value.keys():
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
else:
test_list = [schema_field[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
header = 'Value %s%s%s for %s%s' % (quote_text, test_value, quote_text, value_path, item_text)
if 'excluded_values' in value.keys():
if not qualifier == 'excluded_values':
if test_value in value['excluded_values']:
message = '%s cannot be one of "excluded_values": %s.' % (header, value['excluded_values'])
raise ModelValidationError(message)
if 'discrete_values' in value.keys():
if not qualifier == 'excluded_values':
if test_value not in value['discrete_values']:
message = '%s must be one of "discrete_values": %s' % (header, value['discrete_values'])
raise ModelValidationError(message)
return fields_dict
def _validate_defaults(self, fields_dict):
# validate key names in fields
for key, value in fields_dict.items():
# retrieve value type and type dict
value_type = self.keyCriteria[self.keyName.index(key)]['value_datatype']
# validate discrete value qualifiers against other criteria
qualifier = 'default_value'
if qualifier in value.keys():
multiple_values = False
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
qualifier_text = value_path + item_text
header = 'Value %s%s%s for %s' % (quote_text, test_value, quote_text, qualifier_text)
if value_type == 'list':
default_item_key = '%s[0]' % key
try:
self.validate(test_value, default_item_key, object_title=header)
except Exception as err:
raise ModelValidationError(str(err).strip().replace('field %s' % default_item_key, qualifier_text))
return fields_dict
def _evaluate_field(self, record_dict, field_name, field_criteria):
''' a helper method for evaluating record values based upon query criteria
:param record_dict: dictionary with model valid data to evaluate
:param field_name: string with path to root of query field
:param field_criteria: dictionary with query operators and qualifiers
:return: boolean (True if no field_criteria evaluate to false)
'''
# determine value existence criteria
value_exists = True
if 'value_exists' in field_criteria.keys():
if not field_criteria['value_exists']:
value_exists = False
# validate existence of field
field_exists = True
try:
record_values = self._walk(field_name, record_dict)
except:
field_exists = False
# evaluate existence query criteria
if value_exists != field_exists:
return False
elif not value_exists:
return True
# convert javascript dot_path to class dot_path
field_key = field_name
if not field_name:
field_key = '.'
else:
if field_name[0] != '.':
field_key = '.%s' % field_name
# evaluate other query criteria
for key, value in field_criteria.items():
if key in ('min_size', 'min_length'):
found = False
if self.keyMap[field_key]['value_datatype'] == 'map':
for record_value in record_values:
record_size = sys.getsizeof(json.dumps(str(record_value)).replace(' ','')) - 51
if record_size >= value:
found = True
break
else:
for record_value in record_values:
if len(record_value) >= value:
found = True
break
if not found:
return False
elif key in ('max_size', 'max_length'):
found = False
if self.keyMap[field_key]['value_datatype'] == 'map':
for record_value in record_values:
record_size = sys.getsizeof(json.dumps(str(record_value)).replace(' ','')) - 51
if record_size <= value:
found = True
break
else:
for record_value in record_values:
if len(record_value) <= value:
found = True
break
if not found:
return False
elif key == 'min_value':
found = False
for record_value in record_values:
if record_value >= value:
found = True
break
if not found:
return False
elif key == 'max_value':
found = False
for record_value in record_values:
if record_value <= value:
found = True
break
if not found:
return False
elif key == 'equal_to':
found = False
for record_value in record_values:
if record_value == value:
found = True
break
if not found:
return False
elif key == 'greater_than':
found = False
for record_value in record_values:
if record_value > value:
found = True
break
if not found:
return False
elif key == 'less_than':
found = False
for record_value in record_values:
if record_value < value:
found = True
break
if not found:
return False
elif key == 'excluded_values':
for record_value in record_values:
if record_value in value:
return False
elif key == 'discrete_values':
found = False
for record_value in record_values:
if record_value in value:
found = True
break
if not found:
return False
elif key == 'integer_data':
found = False
dummy_int = 1
for record_value in record_values:
if record_value.__class__ == dummy_int.__class__:
found = True
break
if value != found:
return False
elif key == 'byte_data':
found = False
for record_value in record_values:
try:
decoded_bytes = b64decode(record_value)
except:
decoded_bytes = ''
if isinstance(decoded_bytes, bytes):
found = True
break
if value != found:
return False
elif key == 'must_contain':
for regex in value:
regex_pattern = re.compile(regex)
found = False
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if not found:
return False
elif key == 'must_not_contain':
for regex in value:
regex_pattern = re.compile(regex)
found = False
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if found:
return False
elif key == 'contains_either':
found = False
for regex in value:
regex_pattern = re.compile(regex)
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if found:
break
if not found:
return False
elif key == 'unique_values':
for record_value in record_values:
unique_values = True
if len(record_value) != len(set(record_value)):
unique_values = False
if value != unique_values:
return False
return True
def _validate_dict(self, input_dict, schema_dict, path_to_root, object_title=''):
''' a helper method for recursively validating keys in dictionaries
:return input_dict
'''
# reconstruct key path to current dictionary in model
rules_top_level_key = re.sub('\[\d+\]', '[0]', path_to_root)
map_rules = self.keyMap[rules_top_level_key]
# construct list error report template
map_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': map_rules,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': 0,
'error_code': 4001
}
# validate map size
if 'min_size' in map_rules.keys():
input_size = sys.getsizeof(json.dumps(str(input_dict)).replace(' ','')) - 51
if input_size < map_rules['min_size']:
map_error['failed_test'] = 'min_size'
map_error['error_value'] = input_size
map_error['error_code'] = 4031
raise InputValidationError(map_error)
if 'max_size' in map_rules.keys():
input_size = sys.getsizeof(json.dumps(str(input_dict)).replace(' ','')) - 51
if input_size > map_rules['max_size']:
map_error['failed_test'] = 'max_size'
map_error['error_value'] = input_size
map_error['error_code'] = 4032
raise InputValidationError(map_error)
# construct lists of keys in input dictionary
input_keys = []
input_key_list = []
for key in input_dict.keys():
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'key_datatype',
'input_path': path_to_root,
'error_value': key,
'error_code': 4004
}
error_dict['input_criteria']['key_datatype'] = 'string'
if path_to_root == '.':
if not isinstance(key, str):
input_key_name = path_to_root + str(key)
error_dict['input_path'] = input_key_name
raise InputValidationError(error_dict)
input_key_name = path_to_root + key
else:
if not isinstance(key, str):
input_key_name = path_to_root + '.' + str(key)
error_dict['input_path'] = input_key_name
raise InputValidationError(error_dict)
input_key_name = path_to_root + '.' + key
input_keys.append(input_key_name)
input_key_list.append(key)
# TODO: validate top-level key and values against identical to reference
# TODO: run lambda function and call validation
# construct lists of keys in schema dictionary
max_keys = []
max_key_list = []
req_keys = []
req_key_list = []
for key in schema_dict.keys():
if path_to_root == '.':
schema_key_name = path_to_root + key
else:
schema_key_name = path_to_root + '.' + key
max_keys.append(schema_key_name)
max_key_list.append(key)
rules_schema_key_name = re.sub('\[\d+\]', '[0]', schema_key_name)
if self.keyMap[rules_schema_key_name]['required_field']:
req_keys.append(schema_key_name)
req_key_list.append(key)
# validate existence of required fields
missing_keys = set(req_keys) - set(input_keys)
if missing_keys:
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'required_field',
'input_path': path_to_root,
'error_value': list(missing_keys),
'error_code': 4002
}
error_dict['input_criteria']['required_keys'] = req_keys
raise InputValidationError(error_dict)
# validate existence of extra fields
extra_keys = set(input_keys) - set(max_keys)
if extra_keys and not self.keyMap[rules_top_level_key]['extra_fields']:
extra_key_list = []
for key in extra_keys:
pathless_key = re.sub(rules_top_level_key, '', key, count=1)
extra_key_list.append(pathless_key)
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'extra_fields',
'input_path': path_to_root,
'error_value': extra_key_list,
'error_code': 4003
}
error_dict['input_criteria']['maximum_scope'] = max_key_list
raise InputValidationError(error_dict)
# validate datatype of value
for key, value in input_dict.items():
if path_to_root == '.':
input_key_name = path_to_root + key
else:
input_key_name = path_to_root + '.' + key
rules_input_key_name = re.sub('\[\d+\]', '[0]', input_key_name)
if input_key_name in max_keys:
input_criteria = self.keyMap[rules_input_key_name]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': input_key_name,
'error_value': value,
'error_code': 4001
}
try:
value_index = self._datatype_classes.index(value.__class__)
except:
error_dict['error_value'] = value.__class__.__name__
raise InputValidationError(error_dict)
value_type = self._datatype_names[value_index]
if input_criteria['value_datatype'] == 'null':
pass
else:
if value_type != input_criteria['value_datatype']:
raise InputValidationError(error_dict)
# call appropriate validation sub-routine for datatype of value
if value_type == 'boolean':
input_dict[key] = self._validate_boolean(value, input_key_name, object_title)
elif value_type == 'number':
input_dict[key] = self._validate_number(value, input_key_name, object_title)
elif value_type == 'string':
input_dict[key] = self._validate_string(value, input_key_name, object_title)
elif value_type == 'map':
input_dict[key] = self._validate_dict(value, schema_dict[key], input_key_name, object_title)
elif value_type == 'list':
input_dict[key] = self._validate_list(value, schema_dict[key], input_key_name, object_title)
# set default values for empty optional fields
for key in max_key_list:
if key not in input_key_list:
indexed_key = max_keys[max_key_list.index(key)]
if indexed_key in self.components.keys():
if 'default_value' in self.components[indexed_key]:
input_dict[key] = self.components[indexed_key]['default_value']
return input_dict
def _validate_list(self, input_list, schema_list, path_to_root, object_title=''):
'''
a helper method for recursively validating items in a list
:return: input_list
'''
# construct rules for list and items
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
list_rules = self.keyMap[rules_path_to_root]
initial_key = rules_path_to_root + '[0]'
item_rules = self.keyMap[initial_key]
# construct list error report template
list_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': list_rules,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': 0,
'error_code': 4001
}
# validate list rules
if 'min_size' in list_rules.keys():
if len(input_list) < list_rules['min_size']:
list_error['failed_test'] = 'min_size'
list_error['error_value'] = len(input_list)
list_error['error_code'] = 4031
raise InputValidationError(list_error)
if 'max_size' in list_rules.keys():
if len(input_list) > list_rules['max_size']:
list_error['failed_test'] = 'max_size'
list_error['error_value'] = len(input_list)
list_error['error_code'] = 4032
raise InputValidationError(list_error)
# construct item error report template
item_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': item_rules,
'failed_test': 'value_datatype',
'input_path': initial_key,
'error_value': None,
'error_code': 4001
}
# validate datatype of items
for i in range(len(input_list)):
input_path = path_to_root + '[%s]' % i
item = input_list[i]
item_error['input_path'] = input_path
try:
item_index = self._datatype_classes.index(item.__class__)
except:
item_error['error_value'] = item.__class__.__name__
raise InputValidationError(item_error)
item_type = self._datatype_names[item_index]
item_error['error_value'] = item
if item_rules['value_datatype'] == 'null':
pass
else:
if item_type != item_rules['value_datatype']:
raise InputValidationError(item_error)
# call appropriate validation sub-routine for datatype of item
if item_type == 'boolean':
input_list[i] = self._validate_boolean(item, input_path, object_title)
elif item_type == 'number':
input_list[i] = self._validate_number(item, input_path, object_title)
elif item_type == 'string':
input_list[i] = self._validate_string(item, input_path, object_title)
elif item_type == 'map':
input_list[i] = self._validate_dict(item, schema_list[0], input_path, object_title)
elif item_type == 'list':
input_list[i] = self._validate_list(item, schema_list[0], input_path, object_title)
# validate unique values in list
if 'unique_values' in list_rules.keys():
if len(set(input_list)) < len(input_list):
list_error['failed_test'] = 'unique_values'
list_error['error_value'] = input_list
list_error['error_code'] = 4033
raise InputValidationError(list_error)
# TODO: validate top-level item values against identical to reference
# TODO: run lambda function and call validation url
return input_list
def _validate_number(self, input_number, path_to_root, object_title=''):
'''
a helper method for validating properties of a number
:return: input_number
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_number,
'error_code': 4001
}
if 'integer_data' in input_criteria.keys():
if input_criteria['integer_data'] and not isinstance(input_number, int):
error_dict['failed_test'] = 'integer_data'
error_dict['error_code'] = 4021
raise InputValidationError(error_dict)
if 'min_value' in input_criteria.keys():
if input_number < input_criteria['min_value']:
error_dict['failed_test'] = 'min_value'
error_dict['error_code'] = 4022
raise InputValidationError(error_dict)
if 'max_value' in input_criteria.keys():
if input_number > input_criteria['max_value']:
error_dict['failed_test'] = 'max_value'
error_dict['error_code'] = 4023
raise InputValidationError(error_dict)
if 'greater_than' in input_criteria.keys():
if input_number <= input_criteria['greater_than']:
error_dict['failed_test'] = 'greater_than'
error_dict['error_code'] = 4024
raise InputValidationError(error_dict)
if 'less_than' in input_criteria.keys():
if input_number >= input_criteria['less_than']:
error_dict['failed_test'] = 'less_than'
error_dict['error_code'] = 4025
raise InputValidationError(error_dict)
if 'equal_to' in input_criteria.keys():
if input_number != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
if 'discrete_values' in input_criteria.keys():
if input_number not in input_criteria['discrete_values']:
error_dict['failed_test'] = 'discrete_values'
error_dict['error_code'] = 4041
raise InputValidationError(error_dict)
if 'excluded_values' in input_criteria.keys():
if input_number in input_criteria['excluded_values']:
error_dict['failed_test'] = 'excluded_values'
error_dict['error_code'] = 4042
raise InputValidationError(error_dict)
# TODO: validate number against identical to reference
# TODO: run lambda function and call validation url
return input_number
def _validate_string(self, input_string, path_to_root, object_title=''):
'''
a helper method for validating properties of a string
:return: input_string
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_string,
'error_code': 4001
}
if 'byte_data' in input_criteria.keys():
if input_criteria['byte_data']:
error_dict['failed_test'] = 'byte_data'
error_dict['error_code'] = 4011
try:
decoded_bytes = b64decode(input_string)
except:
raise InputValidationError(error_dict)
if not isinstance(decoded_bytes, bytes):
raise InputValidationError(error_dict)
if 'min_value' in input_criteria.keys():
if input_string < input_criteria['min_value']:
error_dict['failed_test'] = 'min_value'
error_dict['error_code'] = 4022
raise InputValidationError(error_dict)
if 'max_value' in input_criteria.keys():
if input_string > input_criteria['max_value']:
error_dict['failed_test'] = 'max_value'
error_dict['error_code'] = 4023
raise InputValidationError(error_dict)
if 'greater_than' in input_criteria.keys():
if input_string <= input_criteria['greater_than']:
error_dict['failed_test'] = 'greater_than'
error_dict['error_code'] = 4024
raise InputValidationError(error_dict)
if 'less_than' in input_criteria.keys():
if input_string >= input_criteria['less_than']:
error_dict['failed_test'] = 'less_than'
error_dict['error_code'] = 4025
raise InputValidationError(error_dict)
if 'equal_to' in input_criteria.keys():
if input_string != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
if 'min_length' in input_criteria.keys():
if len(input_string) < input_criteria['min_length']:
error_dict['failed_test'] = 'min_length'
error_dict['error_code'] = 4012
raise InputValidationError(error_dict)
if 'max_length' in input_criteria.keys():
if len(input_string) > input_criteria['max_length']:
error_dict['failed_test'] = 'max_length'
error_dict['error_code'] = 4013
raise InputValidationError(error_dict)
if 'must_not_contain' in input_criteria.keys():
for regex in input_criteria['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(input_string):
error_dict['failed_test'] = 'must_not_contain'
error_dict['error_code'] = 4014
raise InputValidationError(error_dict)
if 'must_contain' in input_criteria.keys():
for regex in input_criteria['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(input_string):
error_dict['failed_test'] = 'must_contain'
error_dict['error_code'] = 4015
raise InputValidationError(error_dict)
if 'contains_either' in input_criteria.keys():
regex_match = False
for regex in input_criteria['contains_either']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(input_string):
regex_match = True
if not regex_match:
error_dict['failed_test'] = 'contains_either'
error_dict['error_code'] = 4016
raise InputValidationError(error_dict)
if 'discrete_values' in input_criteria.keys():
if input_string not in input_criteria['discrete_values']:
error_dict['failed_test'] = 'discrete_values'
error_dict['error_code'] = 4041
raise InputValidationError(error_dict)
if 'excluded_values' in input_criteria.keys():
if input_string in input_criteria['excluded_values']:
error_dict['failed_test'] = 'excluded_values'
error_dict['error_code'] = 4042
raise InputValidationError(error_dict)
# TODO: validate string against identical to reference
# TODO: run lambda function and call validation url
return input_string
def _validate_boolean(self, input_boolean, path_to_root, object_title=''):
'''
a helper method for validating properties of a boolean
:return: input_boolean
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_boolean,
'error_code': 4001
}
if 'equal_to' in input_criteria.keys():
if input_boolean != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
# TODO: validate boolean against identical to reference
# TODO: run lambda function and call validation url
return input_boolean
def _ingest_list(self, input_list, schema_list, path_to_root):
'''
a helper method for ingesting items in a list
:return: valid_list
'''
valid_list = []
# construct max list size
max_size = None
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'max_size' in self.keyMap[rules_path_to_root].keys():
if not self.keyMap[rules_path_to_root]['max_size']:
return valid_list
else:
max_size = self.keyMap[rules_path_to_root]['max_size']
# iterate over items in input list
if input_list:
rules_index = self._datatype_classes.index(schema_list[0].__class__)
rules_type = self._datatype_names[rules_index]
for i in range(len(input_list)):
item_path = '%s[%s]' % (path_to_root, i)
value_match = False
try:
item_index = self._datatype_classes.index(input_list[i].__class__)
item_type = self._datatype_names[item_index]
if item_type == rules_type:
value_match = True
except:
value_match = False
if value_match:
try:
if item_type == 'boolean':
valid_list.append(self._validate_boolean(input_list[i], item_path))
elif item_type == 'number':
valid_list.append(self._validate_number(input_list[i], item_path))
elif item_type == 'string':
valid_list.append(self._validate_string(input_list[i], item_path))
elif item_type == 'map':
valid_list.append(self._ingest_dict(input_list[i], schema_list[0], item_path))
elif item_type == 'list':
valid_list.append(self._ingest_list(input_list[i], schema_list[0], item_path))
except:
pass
if isinstance(max_size, int):
if len(valid_list) == max_size:
return valid_list
return valid_list
def _ingest_number(self, input_number, path_to_root):
'''
a helper method for ingesting a number
:return: valid_number
'''
valid_number = 0.0
try:
valid_number = self._validate_number(input_number, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_number = self.keyMap[rules_path_to_root]['default_value']
elif 'integer_data' in self.keyMap[rules_path_to_root].keys():
if self.keyMap[rules_path_to_root]['integer_data']:
valid_number = 0
return valid_number
def _ingest_string(self, input_string, path_to_root):
'''
a helper method for ingesting a string
:return: valid_string
'''
valid_string = ''
try:
valid_string = self._validate_string(input_string, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_string = self.keyMap[rules_path_to_root]['default_value']
return valid_string
def _ingest_boolean(self, input_boolean, path_to_root):
'''
a helper method for ingesting a boolean
:return: valid_boolean
'''
valid_boolean = False
try:
valid_boolean = self._validate_boolean(input_boolean, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_boolean = self.keyMap[rules_path_to_root]['default_value']
return valid_boolean
def _reconstruct(self, path_to_root):
'''
a helper method for finding the schema endpoint from a path to root
:param path_to_root: string with dot path to root from
:return: list, dict, string, number, or boolean at path to root
'''
# split path to root into segments
item_pattern = re.compile('\d+\\]')
dot_pattern = re.compile('\\.|\\[')
path_segments = dot_pattern.split(path_to_root)
# construct base schema endpoint
schema_endpoint = self.schema
# reconstruct schema endpoint from segments
if path_segments[1]:
for i in range(1,len(path_segments)):
if item_pattern.match(path_segments[i]):
schema_endpoint = schema_endpoint[0]
else:
schema_endpoint = schema_endpoint[path_segments[i]]
return schema_endpoint
def _walk(self, path_to_root, record_dict):
'''
a helper method for finding the record endpoint from a path to root
:param path_to_root: string with dot path to root from
:param record_dict:
:return: list, dict, string, number, or boolean at path to root
'''
# split path to root into segments
item_pattern = re.compile('\d+\\]')
dot_pattern = re.compile('\\.|\\[')
path_segments = dot_pattern.split(path_to_root)
# construct empty fields
record_endpoints = []
# determine starting position
if not path_segments[0]:
path_segments.pop(0)
# define internal recursive function
def _walk_int(path_segments, record_dict):
record_endpoint = record_dict
for i in range(0, len(path_segments)):
if item_pattern.match(path_segments[i]):
for j in range(0, len(record_endpoint)):
if len(path_segments) == 2:
record_endpoints.append(record_endpoint[j])
else:
stop_chain = False
for x in range(0, i):
if item_pattern.match(path_segments[x]):
stop_chain = True
if not stop_chain:
shortened_segments = []
for z in range(i + 1, len(path_segments)):
shortened_segments.append(path_segments[z])
_walk_int(shortened_segments, record_endpoint[j])
else:
stop_chain = False
for y in range(0, i):
if item_pattern.match(path_segments[y]):
stop_chain = True
if not stop_chain:
if len(path_segments) == i + 1:
record_endpoints.append(record_endpoint[path_segments[i]])
else:
record_endpoint = record_endpoint[path_segments[i]]
# conduct recursive walk
_walk_int(path_segments, record_dict)
return record_endpoints
def validate(self, input_data, path_to_root='', object_title=''):
'''
a core method for validating input against the model
input_data is only returned if all data is valid
:param input_data: list, dict, string, number, or boolean to validate
:param path_to_root: [optional] string with dot-path of model component
:param object_title: [optional] string with name of input to validate
:return: input_data (or InputValidationError)
'''
__name__ = '%s.validate' % self.__class__.__name__
_path_arg = '%s(path_to_root="...")' % __name__
_title_arg = '%s(object_title="...")' % __name__
# validate input
copy_path = path_to_root
if path_to_root:
if not isinstance(path_to_root, str):
raise ModelValidationError('%s must be a string.' % _path_arg)
else:
if path_to_root[0] != '.':
copy_path = '.%s' % path_to_root
if not copy_path in self.keyMap.keys():
raise ModelValidationError('%s does not exist in components %s.' % (_path_arg.replace('...', path_to_root), self.keyMap.keys()))
else:
copy_path = '.'
if object_title:
if not isinstance(object_title, str):
raise ModelValidationError('%s must be a string' % _title_arg)
# construct generic error dictionary
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[copy_path],
'failed_test': 'value_datatype',
'input_path': copy_path,
'error_value': input_data,
'error_code': 4001
}
# determine value type of input data
try:
input_index = self._datatype_classes.index(input_data.__class__)
except:
error_dict['error_value'] = input_data.__class__.__name__
raise InputValidationError(error_dict)
input_type = self._datatype_names[input_index]
# validate input data type
if input_type != self.keyMap[copy_path]['value_datatype']:
raise InputValidationError(error_dict)
# run helper method appropriate to data type
if input_type == 'boolean':
input_data = self._validate_boolean(input_data, copy_path, object_title)
elif input_type == 'number':
input_data = self._validate_number(input_data, copy_path, object_title)
elif input_type == 'string':
input_data = self._validate_string(input_data, copy_path, object_title)
elif input_type == 'list':
schema_list = self._reconstruct(copy_path)
input_data = self._validate_list(input_data, schema_list, copy_path, object_title)
elif input_type == 'map':
schema_dict = self._reconstruct(copy_path)
input_data = self._validate_dict(input_data, schema_dict, copy_path, object_title)
return input_data
def ingest(self, **kwargs):
'''
a core method to ingest and validate arbitrary keyword data
**NOTE: data is always returned with this method**
for each key in the model, a value is returned according
to the following priority:
1. value in kwargs if field passes validation test
2. default value declared for the key in the model
3. empty value appropriate to datatype of key in the model
**NOTE: as long as a default value is provided for each key-
value, returned data will be model valid
**NOTE: if 'extra_fields' is True for a dictionary, the key-
value pair of all fields in kwargs which are not declared in
the model will also be added to the corresponding dictionary
data
**NOTE: if 'max_size' is declared for a list, method will
stop adding input to the list once it reaches max size
:param kwargs: key, value pairs
:return: dictionary with keys and value
'''
__name__ = '%s.ingest' % self.__class__.__name__
schema_dict = self.schema
path_to_root = '.'
valid_data = self._ingest_dict(kwargs, schema_dict, path_to_root)
return valid_data
def query(self, query_criteria, valid_record=None):
'''
a core method for querying model valid data with criteria
**NOTE: input is only returned if all fields & qualifiers are valid for model
:param query_criteria: dictionary with model field names and query qualifiers
:param valid_record: dictionary with model valid record
:return: boolean (or QueryValidationError)
an example of how to construct the query_criteria argument:
query_criteria = {
'.path.to.number': {
'min_value': 4.5
},
'.path.to.string': {
'must_contain': [ '\\regex' ]
}
}
**NOTE: for a full list of operators for query_criteria based upon field
datatype, see either the query-rules.json file or REFERENCE file
'''
__name__ = '%s.query' % self.__class__.__name__
_query_arg = '%s(query_criteria={...})' % __name__
_record_arg = '%s(valid_record={...})' % __name__
# validate input
if not isinstance(query_criteria, dict):
raise ModelValidationError('%s must be a dictionary.' % _query_arg)
# convert javascript dot_path to class dot_path
criteria_copy = {}
equal_fields = []
dot_fields = []
for key, value in query_criteria.items():
copy_key = key
if not key:
copy_key = '.'
else:
if key[0] != '.':
copy_key = '.%s' % key
dot_fields.append(copy_key)
criteria_copy[copy_key] = value
if value.__class__ in self._datatype_classes[0:4]:
criteria_copy[copy_key] = {
'equal_to': value
}
equal_fields.append(copy_key)
# validate query criteria against query rules
query_kwargs = {
'fields_dict': criteria_copy,
'fields_rules': self.queryRules,
'declared_value': False
}
try:
self._validate_fields(**query_kwargs)
except ModelValidationError as err:
message = err.error['message']
for field in equal_fields:
equal_error = 'field %s qualifier equal_to' % field
if message.find(equal_error) > -1:
message = message.replace(equal_error, 'field %s' % field)
break
field_pattern = re.compile('ield\s(\..*?)\s')
field_name = field_pattern.findall(message)
if field_name:
if field_name[0] in dot_fields:
def _replace_field(x):
return 'ield %s ' % x.group(1)[1:]
message = field_pattern.sub(_replace_field, message)
raise QueryValidationError(message)
# query test record
if valid_record:
if not isinstance(valid_record, dict):
raise ModelValidationError('%s must be a dictionary.' % _record_arg)
for key, value in criteria_copy.items():
eval_outcome = self._evaluate_field(valid_record, key, value)
if not eval_outcome:
return False
return True
|
collectiveacuity/jsonModel
|
jsonmodel/validators.py
|
jsonModel._ingest_list
|
python
|
def _ingest_list(self, input_list, schema_list, path_to_root):
'''
a helper method for ingesting items in a list
:return: valid_list
'''
valid_list = []
# construct max list size
max_size = None
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'max_size' in self.keyMap[rules_path_to_root].keys():
if not self.keyMap[rules_path_to_root]['max_size']:
return valid_list
else:
max_size = self.keyMap[rules_path_to_root]['max_size']
# iterate over items in input list
if input_list:
rules_index = self._datatype_classes.index(schema_list[0].__class__)
rules_type = self._datatype_names[rules_index]
for i in range(len(input_list)):
item_path = '%s[%s]' % (path_to_root, i)
value_match = False
try:
item_index = self._datatype_classes.index(input_list[i].__class__)
item_type = self._datatype_names[item_index]
if item_type == rules_type:
value_match = True
except:
value_match = False
if value_match:
try:
if item_type == 'boolean':
valid_list.append(self._validate_boolean(input_list[i], item_path))
elif item_type == 'number':
valid_list.append(self._validate_number(input_list[i], item_path))
elif item_type == 'string':
valid_list.append(self._validate_string(input_list[i], item_path))
elif item_type == 'map':
valid_list.append(self._ingest_dict(input_list[i], schema_list[0], item_path))
elif item_type == 'list':
valid_list.append(self._ingest_list(input_list[i], schema_list[0], item_path))
except:
pass
if isinstance(max_size, int):
if len(valid_list) == max_size:
return valid_list
return valid_list
|
a helper method for ingesting items in a list
:return: valid_list
|
train
|
https://github.com/collectiveacuity/jsonModel/blob/1ea64c36d78add3faa7b85ff82c5ec685458c940/jsonmodel/validators.py#L1274-L1325
| null |
class jsonModel(object):
__rules__ = jsonLoader('jsonmodel', 'models/model-rules.json')
def __init__(self, data_model, query_rules=None):
'''
a method for testing data model declaration & initializing the class
:param data_model: dictionary with json model architecture
:param query_rules: [optional] dictionary with valid field type qualifiers
:return: object with jsonModel methods
'''
# validate schema input
if not isinstance(data_model, dict):
raise ModelValidationError('Data model must be a dictionary.')
elif 'schema' not in data_model.keys():
raise ModelValidationError('Data model must have a schema key.')
elif not isinstance(data_model['schema'], dict):
raise ModelValidationError('Value for the data model "schema" field must be a dictionary.')
elif not data_model['schema']:
raise ModelValidationError('Data model "schema" field must not be empty.')
# construct base methods
from copy import deepcopy
data_model = deepcopy(data_model)
self.schema = data_model['schema']
model_map = mapModel(self.schema)
self.keyName = model_map.keyName
self.keyCriteria = model_map.keyCriteria
# construct protected type classes
self._datatype_names = mapModel._datatype_names
self._datatype_classes = mapModel._datatype_classes
# validate absence of item designators in keys
item_pattern = re.compile('\[\d+\]')
for i in range(len(self.keyName)):
patterns_found = item_pattern.findall(self.keyName[i])
if patterns_found:
for designator in patterns_found:
if designator != '[0]':
message = 'Key name for schema field %s must not contain the item designator pattern %s' % (self.keyName[i], designator)
raise ModelValidationError(message)
# validate existence of first item in list declarations
key_set = set(self.keyName)
for i in range(len(self.keyName)):
if self.keyCriteria[i]['value_datatype'] == 'list':
item_key = self.keyName[i] + '[0]'
if not item_key in key_set:
message = 'Schema field %s must declare an initial item for the list.' % self.keyName[i]
raise ModelValidationError(message)
# alter list requirement if first item is empty
else:
item_index = self.keyName.index(item_key)
if not self.keyCriteria[item_index]['required_field']:
self.keyCriteria[i]['required_field'] = False
# validate title input & construct title method
self.title = ''
if 'title' in data_model.keys():
if not isinstance(data_model['title'], str):
raise ModelValidationError('Value for model title must be a string.')
self.title = data_model['title']
# validate description input & construct description method
self.description = ''
if 'description' in data_model.keys():
if not isinstance(data_model['description'], str):
raise ModelValidationError('Value for model description must be a string.')
self.description = data_model['description']
# validate url input & construct title method
self.url = ''
if 'url' in data_model.keys():
if not isinstance(data_model['url'], str):
raise ModelValidationError('Value for model url must be a string.')
self.title = data_model['url']
# validate metadata input & construct metadata method
self.metadata = {}
if 'metadata' in data_model.keys():
if not isinstance(data_model['metadata'], dict):
raise ModelValidationError('Value for model metadata must be a dictionary.')
self.metadata = data_model['metadata']
# validate max size input & construct maxSize property
# self.maxSize = None
# if 'max_size' in data_model.keys():
# if not isinstance(data_model['max_size'], int):
# raise ModelValidationError('Value for model max_size must be a positive integer.')
# elif data_model['max_size'] < 0:
# raise ModelValidationError('Value for model max_size must be a positive integer.')
# elif data_model['max_size']:
# self.maxSize = data_model['max_size']
# validate components input & construct component property
self.components = {}
if 'components' in data_model.keys():
if not isinstance(data_model['components'], dict):
raise ModelValidationError('Value for model components must be a dictionary.')
self.components = self._validate_fields(data_model['components'], self.__rules__['components'])
# construct keyMap fields from key names and key criteria
self.keyMap = {}
for i in range(len(self.keyName)):
self.keyMap[self.keyName[i]] = self.keyCriteria[i]
for key, value in self.components.items():
# convert javascript dot_path to class dot_path
dot_key = ''
if not key:
dot_key = '.'
else:
if key[0] != '.':
dot_key = '.%s' % key
# add component declarations to keyMap
if key in self.keyMap.keys():
for k, v in self.components[key].items():
self.keyMap[key][k] = v
elif dot_key and dot_key in self.keyMap.keys():
for k, v in self.components[key].items():
self.keyMap[dot_key][k] = v
# validate default values in lists
self._validate_defaults(self.keyMap)
# construct queryRules property from class model rules
self.queryRules = {}
for key, value in self.__rules__['components'].items():
remove_from_query = [ 'required_field', 'default_value', 'example_values', 'field_title', 'field_position', 'field_description', 'field_metadata', 'extra_fields' ]
field_qualifiers = {
'value_exists': False
}
for k, v in value.items():
if k not in remove_from_query:
field_qualifiers[k] = v
self.queryRules[key] = field_qualifiers
# validate query rules input and replace queryRules property
if query_rules:
if not isinstance(query_rules, dict):
message = 'Value for query rules input must be a dictionary.'
raise ModelValidationError(message)
input_set = set(query_rules.keys())
req_set = set(self.queryRules.keys())
if input_set - req_set:
message = 'Query rules input may only have %s field key names.' % req_set
raise ModelValidationError(message)
elif req_set - input_set:
message = 'Query rules input must have all %s field key names.' % req_set
raise ModelValidationError(message)
for key in req_set:
if not isinstance(query_rules[key], dict):
message = 'Value for query rules %s field must be a dictionary.' % key
raise ModelValidationError(message)
input_qualifier_set = set(query_rules[key].keys())
req_qualifier_set = set(self.queryRules[key].keys())
if input_qualifier_set - req_qualifier_set:
message = 'Query rules field %s may only have qualifiers %s' % (key, req_qualifier_set)
raise ModelValidationError(message)
for k, v in query_rules[key].items():
if v.__class__ != self.queryRules[key][k].__class__:
qualifier_index = self._datatype_classes.index(self.queryRules[key][k].__class__)
qualifier_type = self._datatype_names[qualifier_index]
message = 'Value for query rules field %s qualifier %s must be a "%s" datatype.' % (key, k, qualifier_type)
raise ModelValidationError(message)
self.queryRules = query_rules
def _validate_fields(self, fields_dict, fields_rules, declared_value=True):
# validate key names in fields
for key, value in fields_dict.items():
# convert javascript dot_path to class dot_path
if not key:
key = '.'
else:
if key[0] != '.':
key = '.%s' % key
if key not in self.keyName:
raise ModelValidationError('Field %s is not a field declared in model schema.' % key)
elif not isinstance(value, dict):
raise ModelValidationError('Value for field %s must be a dictionary.' % key)
# validate field criteria are appropriate to field datatype
value_type = self.keyCriteria[self.keyName.index(key)]['value_datatype']
type_dict = {}
if value_type == 'string':
type_dict = fields_rules['.string_fields']
elif value_type == 'number':
type_dict = fields_rules['.number_fields']
elif value_type == 'boolean':
type_dict = fields_rules['.boolean_fields']
elif value_type == 'list':
type_dict = fields_rules['.list_fields']
elif value_type == 'map':
type_dict = fields_rules['.map_fields']
elif value_type == 'null':
type_dict = fields_rules['.null_fields']
if set(value.keys()) - set(type_dict.keys()):
raise ModelValidationError('Field %s may only have datatype %s qualifiers %s.' % (key, value_type, set(type_dict.keys())))
# validate criteria qualifier values are appropriate datatype
for k, v in value.items():
v_index = self._datatype_classes.index(v.__class__)
v_type = self._datatype_names[v_index]
qualifier_index = self._datatype_classes.index(type_dict[k].__class__)
qualifier_type = self._datatype_names[qualifier_index]
if v_type != qualifier_type:
message = 'Value for field %s qualifier %s must be a %s datatype.' % (key, k, qualifier_type)
raise ModelValidationError(message)
if qualifier_type == 'number':
if isinstance(type_dict[k], int):
if not isinstance(v, int):
message = 'Value for field %s qualifier %s must be an integer.' % (key, k)
raise ModelValidationError(message)
# validate internal logic of each qualifier value declaration
if k in ('must_not_contain', 'must_contain', 'contains_either'):
for item in v:
if not isinstance(item, str):
message = 'Each item in list field %s qualifier %s must be a string.' % (key, k)
raise ModelValidationError(message)
if k in ('min_length', 'max_length', 'min_size', 'max_size'):
if v < 0:
message = 'Value for field %s qualifier %s cannot be negative.' % (key, k)
raise ModelValidationError(message)
if k in ('discrete_values', 'excluded_values', 'example_values'):
for item in v:
if value_type == 'number':
if not isinstance(item, int) and not isinstance(item, float):
message = 'Each item in field %s qualifier %s list must be a number.' % (key, k)
raise ModelValidationError(message)
elif not isinstance(item, str):
message = 'Each item in list for field %s qualifier %s must be a string.' % (key, k)
raise ModelValidationError(message)
if k == 'identical_to':
if not v in self.keyName:
message = 'Value "%s" for field %s qualifier %s not found in components keys.' % (v, key, k)
raise ModelValidationError(message)
if k == 'unique_values':
if v:
item_name = key + '[0]'
item_type = self.keyCriteria[self.keyName.index(item_name)]['value_datatype']
if not item_type in ('number', 'string'):
message = 'Field %s[0] must be either a string or number if qualifier "unique_values": true' % key
raise ModelValidationError(message)
# validate lack of other qualifiers if value exist is false
if 'value_exists' in value.keys():
if not value['value_exists']:
if set(value.keys()) - {'value_exists'}:
message = 'If field %s qualifier value_exists: false, field may not have other qualifiers.' % key
raise ModelValidationError(message)
# validate size qualifiers against each other
size_qualifiers = ['min_size', 'max_size']
for qualifier in size_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
header = 'Value %s for %s' % (test_value, value_path)
if 'min_size' in value.keys():
if test_value < value['min_size']:
message = '%s must not be less than "min_size": %s' % (header, value['min_size'])
raise ModelValidationError(message)
if 'max_size' in value.keys():
if test_value > value['max_size']:
message = '%s must not be greater than "max_size": %s' % (header, value['max_size'])
raise ModelValidationError(message)
# validate length qualifiers against each other
length_qualifiers = ['min_length', 'max_length']
for qualifier in length_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
header = 'Value %s for %s' % (test_value, value_path)
if 'min_length' in value.keys():
if test_value < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if test_value > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
# validate range qualifiers against each other & length qualifiers
range_qualifiers = ['min_value', 'max_value', 'greater_than', 'less_than', 'equal_to']
for qualifier in range_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
header = 'Value %s%s%s for %s' % (quote_text, test_value, quote_text, value_path)
if 'min_value' in value.keys():
if test_value < value['min_value']:
message = '%s must not be less than "min_value": %s' % (header, value['min_value'])
raise ModelValidationError(message)
if 'max_value' in value.keys():
if test_value > value['max_value']:
message = '%s must not be greater than "max_value": %s' % (header, value['max_value'])
raise ModelValidationError(message)
if 'greater_than' in value.keys():
if test_value <= value['greater_than'] and not qualifier == 'greater_than':
message = '%s must be "greater_than": %s' % (header, value['greater_than'])
raise ModelValidationError(message)
if 'less_than' in value.keys():
if test_value >= value['less_than'] and not qualifier == 'less_than':
message = '%s must be "less_than": %s' % (header, value['less_than'])
raise ModelValidationError(message)
if 'min_length' in value.keys():
if len(test_value) < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if len(test_value) > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
if 'integer_data' in value.keys():
if value['integer_data']:
if not isinstance(test_value, int):
message = '%s must be an "integer_data".' % header
raise ModelValidationError(message)
if 'must_not_contain' in value.keys():
for regex in value['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
message = '%s matches regex pattern in "must_not_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'must_contain' in value.keys():
for regex in value['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(test_value):
message = '%s does not match regex pattern in "must_contain": ["%s"].' % (header, regex)
raise ModelValidationError(message)
if 'contains_either' in value.keys():
regex_match = False
regex_patterns = []
for regex in value['contains_either']:
regex_patterns.append(regex)
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
regex_match = True
if not regex_match:
message = '%s does not match any regex patterns in "contains_either": %s' % (header, regex_patterns)
raise ModelValidationError(message)
if 'byte_data' in value.keys():
if value['byte_data']:
if qualifier != 'equal_to':
message = '%s cannot be used with base64 encoded "byte_data".' % header
raise ModelValidationError(message)
# validate discrete value qualifiers against other criteria
schema_field = self.keyCriteria[self.keyName.index(key)]
discrete_qualifiers = ['declared_value', 'default_value', 'excluded_values', 'discrete_values', 'example_values']
for qualifier in discrete_qualifiers:
test_qualifier = False
if qualifier in schema_field:
test_qualifier = True
if qualifier == 'declared_value' and not schema_field[qualifier]:
test_qualifier = False
if qualifier in value.keys() or (test_qualifier and declared_value):
multiple_values = False
if qualifier in value.keys():
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
else:
test_list = [schema_field[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
header = 'Value %s%s%s for %s%s' % (quote_text, test_value, quote_text, value_path, item_text)
if 'min_value' in value.keys():
if test_value < value['min_value']:
message = '%s must not be less than "min_value": %s' % (header, value['min_value'])
raise ModelValidationError(message)
if 'max_value' in value.keys():
if test_value > value['max_value']:
message = '%s must not be greater than "max_value": %s' % (header, value['max_value'])
raise ModelValidationError(message)
if 'equal_to' in value.keys():
if test_value != value['equal_to']:
if qualifier != 'declared_value' and isinstance(test_value, bool):
message = '%s must be "equal_to": %s' % (header, value['equal_to'])
raise ModelValidationError(message)
if 'greater_than' in value.keys():
if test_value <= value['greater_than']:
message = '%s must be "greater_than": %s' % (header, value['greater_than'])
raise ModelValidationError(message)
if 'less_than' in value.keys():
if test_value >= value['less_than']:
message = '%s must be "less_than": %s' % (header, value['less_than'])
raise ModelValidationError(message)
if 'integer_data' in value.keys():
if value['integer_data']:
if not isinstance(test_value, int):
message = '%s must be an "integer_data".' % header
raise ModelValidationError(message)
if 'min_length' in value.keys():
if len(test_value) < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if len(test_value) > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
if 'must_not_contain' in value.keys():
for regex in value['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
message = '%s matches regex pattern in "must_not_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'must_contain' in value.keys():
for regex in value['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(test_value):
message = '%s does not match regex pattern in "must_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'contains_either' in value.keys():
regex_match = False
regex_patterns = []
for regex in value['contains_either']:
regex_patterns.append(regex)
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
regex_match = True
if not regex_match:
message = '%s does not match any regex patterns in "contains_either": %s' % (header, regex_patterns)
raise ModelValidationError(message)
if 'byte_data' in value.keys():
message = '%s cannot be base64 decoded to "byte_data".' % header
try:
decoded_bytes = b64decode(test_value)
except:
raise ModelValidationError(message)
if not isinstance(decoded_bytes, bytes):
raise ModelValidationError(message)
# validate discrete value qualifiers against each other
for qualifier in discrete_qualifiers:
test_qualifier = False
if qualifier in schema_field:
test_qualifier = True
if qualifier == 'declared_value' and not schema_field[qualifier]:
test_qualifier = False
if qualifier in value.keys() or (test_qualifier and declared_value):
multiple_values = False
if qualifier in value.keys():
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
else:
test_list = [schema_field[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
header = 'Value %s%s%s for %s%s' % (quote_text, test_value, quote_text, value_path, item_text)
if 'excluded_values' in value.keys():
if not qualifier == 'excluded_values':
if test_value in value['excluded_values']:
message = '%s cannot be one of "excluded_values": %s.' % (header, value['excluded_values'])
raise ModelValidationError(message)
if 'discrete_values' in value.keys():
if not qualifier == 'excluded_values':
if test_value not in value['discrete_values']:
message = '%s must be one of "discrete_values": %s' % (header, value['discrete_values'])
raise ModelValidationError(message)
return fields_dict
def _validate_defaults(self, fields_dict):
# validate key names in fields
for key, value in fields_dict.items():
# retrieve value type and type dict
value_type = self.keyCriteria[self.keyName.index(key)]['value_datatype']
# validate discrete value qualifiers against other criteria
qualifier = 'default_value'
if qualifier in value.keys():
multiple_values = False
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
qualifier_text = value_path + item_text
header = 'Value %s%s%s for %s' % (quote_text, test_value, quote_text, qualifier_text)
if value_type == 'list':
default_item_key = '%s[0]' % key
try:
self.validate(test_value, default_item_key, object_title=header)
except Exception as err:
raise ModelValidationError(str(err).strip().replace('field %s' % default_item_key, qualifier_text))
return fields_dict
def _evaluate_field(self, record_dict, field_name, field_criteria):
''' a helper method for evaluating record values based upon query criteria
:param record_dict: dictionary with model valid data to evaluate
:param field_name: string with path to root of query field
:param field_criteria: dictionary with query operators and qualifiers
:return: boolean (True if no field_criteria evaluate to false)
'''
# determine value existence criteria
value_exists = True
if 'value_exists' in field_criteria.keys():
if not field_criteria['value_exists']:
value_exists = False
# validate existence of field
field_exists = True
try:
record_values = self._walk(field_name, record_dict)
except:
field_exists = False
# evaluate existence query criteria
if value_exists != field_exists:
return False
elif not value_exists:
return True
# convert javascript dot_path to class dot_path
field_key = field_name
if not field_name:
field_key = '.'
else:
if field_name[0] != '.':
field_key = '.%s' % field_name
# evaluate other query criteria
for key, value in field_criteria.items():
if key in ('min_size', 'min_length'):
found = False
if self.keyMap[field_key]['value_datatype'] == 'map':
for record_value in record_values:
record_size = sys.getsizeof(json.dumps(str(record_value)).replace(' ','')) - 51
if record_size >= value:
found = True
break
else:
for record_value in record_values:
if len(record_value) >= value:
found = True
break
if not found:
return False
elif key in ('max_size', 'max_length'):
found = False
if self.keyMap[field_key]['value_datatype'] == 'map':
for record_value in record_values:
record_size = sys.getsizeof(json.dumps(str(record_value)).replace(' ','')) - 51
if record_size <= value:
found = True
break
else:
for record_value in record_values:
if len(record_value) <= value:
found = True
break
if not found:
return False
elif key == 'min_value':
found = False
for record_value in record_values:
if record_value >= value:
found = True
break
if not found:
return False
elif key == 'max_value':
found = False
for record_value in record_values:
if record_value <= value:
found = True
break
if not found:
return False
elif key == 'equal_to':
found = False
for record_value in record_values:
if record_value == value:
found = True
break
if not found:
return False
elif key == 'greater_than':
found = False
for record_value in record_values:
if record_value > value:
found = True
break
if not found:
return False
elif key == 'less_than':
found = False
for record_value in record_values:
if record_value < value:
found = True
break
if not found:
return False
elif key == 'excluded_values':
for record_value in record_values:
if record_value in value:
return False
elif key == 'discrete_values':
found = False
for record_value in record_values:
if record_value in value:
found = True
break
if not found:
return False
elif key == 'integer_data':
found = False
dummy_int = 1
for record_value in record_values:
if record_value.__class__ == dummy_int.__class__:
found = True
break
if value != found:
return False
elif key == 'byte_data':
found = False
for record_value in record_values:
try:
decoded_bytes = b64decode(record_value)
except:
decoded_bytes = ''
if isinstance(decoded_bytes, bytes):
found = True
break
if value != found:
return False
elif key == 'must_contain':
for regex in value:
regex_pattern = re.compile(regex)
found = False
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if not found:
return False
elif key == 'must_not_contain':
for regex in value:
regex_pattern = re.compile(regex)
found = False
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if found:
return False
elif key == 'contains_either':
found = False
for regex in value:
regex_pattern = re.compile(regex)
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if found:
break
if not found:
return False
elif key == 'unique_values':
for record_value in record_values:
unique_values = True
if len(record_value) != len(set(record_value)):
unique_values = False
if value != unique_values:
return False
return True
def _validate_dict(self, input_dict, schema_dict, path_to_root, object_title=''):
''' a helper method for recursively validating keys in dictionaries
:return input_dict
'''
# reconstruct key path to current dictionary in model
rules_top_level_key = re.sub('\[\d+\]', '[0]', path_to_root)
map_rules = self.keyMap[rules_top_level_key]
# construct list error report template
map_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': map_rules,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': 0,
'error_code': 4001
}
# validate map size
if 'min_size' in map_rules.keys():
input_size = sys.getsizeof(json.dumps(str(input_dict)).replace(' ','')) - 51
if input_size < map_rules['min_size']:
map_error['failed_test'] = 'min_size'
map_error['error_value'] = input_size
map_error['error_code'] = 4031
raise InputValidationError(map_error)
if 'max_size' in map_rules.keys():
input_size = sys.getsizeof(json.dumps(str(input_dict)).replace(' ','')) - 51
if input_size > map_rules['max_size']:
map_error['failed_test'] = 'max_size'
map_error['error_value'] = input_size
map_error['error_code'] = 4032
raise InputValidationError(map_error)
# construct lists of keys in input dictionary
input_keys = []
input_key_list = []
for key in input_dict.keys():
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'key_datatype',
'input_path': path_to_root,
'error_value': key,
'error_code': 4004
}
error_dict['input_criteria']['key_datatype'] = 'string'
if path_to_root == '.':
if not isinstance(key, str):
input_key_name = path_to_root + str(key)
error_dict['input_path'] = input_key_name
raise InputValidationError(error_dict)
input_key_name = path_to_root + key
else:
if not isinstance(key, str):
input_key_name = path_to_root + '.' + str(key)
error_dict['input_path'] = input_key_name
raise InputValidationError(error_dict)
input_key_name = path_to_root + '.' + key
input_keys.append(input_key_name)
input_key_list.append(key)
# TODO: validate top-level key and values against identical to reference
# TODO: run lambda function and call validation
# construct lists of keys in schema dictionary
max_keys = []
max_key_list = []
req_keys = []
req_key_list = []
for key in schema_dict.keys():
if path_to_root == '.':
schema_key_name = path_to_root + key
else:
schema_key_name = path_to_root + '.' + key
max_keys.append(schema_key_name)
max_key_list.append(key)
rules_schema_key_name = re.sub('\[\d+\]', '[0]', schema_key_name)
if self.keyMap[rules_schema_key_name]['required_field']:
req_keys.append(schema_key_name)
req_key_list.append(key)
# validate existence of required fields
missing_keys = set(req_keys) - set(input_keys)
if missing_keys:
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'required_field',
'input_path': path_to_root,
'error_value': list(missing_keys),
'error_code': 4002
}
error_dict['input_criteria']['required_keys'] = req_keys
raise InputValidationError(error_dict)
# validate existence of extra fields
extra_keys = set(input_keys) - set(max_keys)
if extra_keys and not self.keyMap[rules_top_level_key]['extra_fields']:
extra_key_list = []
for key in extra_keys:
pathless_key = re.sub(rules_top_level_key, '', key, count=1)
extra_key_list.append(pathless_key)
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'extra_fields',
'input_path': path_to_root,
'error_value': extra_key_list,
'error_code': 4003
}
error_dict['input_criteria']['maximum_scope'] = max_key_list
raise InputValidationError(error_dict)
# validate datatype of value
for key, value in input_dict.items():
if path_to_root == '.':
input_key_name = path_to_root + key
else:
input_key_name = path_to_root + '.' + key
rules_input_key_name = re.sub('\[\d+\]', '[0]', input_key_name)
if input_key_name in max_keys:
input_criteria = self.keyMap[rules_input_key_name]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': input_key_name,
'error_value': value,
'error_code': 4001
}
try:
value_index = self._datatype_classes.index(value.__class__)
except:
error_dict['error_value'] = value.__class__.__name__
raise InputValidationError(error_dict)
value_type = self._datatype_names[value_index]
if input_criteria['value_datatype'] == 'null':
pass
else:
if value_type != input_criteria['value_datatype']:
raise InputValidationError(error_dict)
# call appropriate validation sub-routine for datatype of value
if value_type == 'boolean':
input_dict[key] = self._validate_boolean(value, input_key_name, object_title)
elif value_type == 'number':
input_dict[key] = self._validate_number(value, input_key_name, object_title)
elif value_type == 'string':
input_dict[key] = self._validate_string(value, input_key_name, object_title)
elif value_type == 'map':
input_dict[key] = self._validate_dict(value, schema_dict[key], input_key_name, object_title)
elif value_type == 'list':
input_dict[key] = self._validate_list(value, schema_dict[key], input_key_name, object_title)
# set default values for empty optional fields
for key in max_key_list:
if key not in input_key_list:
indexed_key = max_keys[max_key_list.index(key)]
if indexed_key in self.components.keys():
if 'default_value' in self.components[indexed_key]:
input_dict[key] = self.components[indexed_key]['default_value']
return input_dict
def _validate_list(self, input_list, schema_list, path_to_root, object_title=''):
'''
a helper method for recursively validating items in a list
:return: input_list
'''
# construct rules for list and items
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
list_rules = self.keyMap[rules_path_to_root]
initial_key = rules_path_to_root + '[0]'
item_rules = self.keyMap[initial_key]
# construct list error report template
list_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': list_rules,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': 0,
'error_code': 4001
}
# validate list rules
if 'min_size' in list_rules.keys():
if len(input_list) < list_rules['min_size']:
list_error['failed_test'] = 'min_size'
list_error['error_value'] = len(input_list)
list_error['error_code'] = 4031
raise InputValidationError(list_error)
if 'max_size' in list_rules.keys():
if len(input_list) > list_rules['max_size']:
list_error['failed_test'] = 'max_size'
list_error['error_value'] = len(input_list)
list_error['error_code'] = 4032
raise InputValidationError(list_error)
# construct item error report template
item_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': item_rules,
'failed_test': 'value_datatype',
'input_path': initial_key,
'error_value': None,
'error_code': 4001
}
# validate datatype of items
for i in range(len(input_list)):
input_path = path_to_root + '[%s]' % i
item = input_list[i]
item_error['input_path'] = input_path
try:
item_index = self._datatype_classes.index(item.__class__)
except:
item_error['error_value'] = item.__class__.__name__
raise InputValidationError(item_error)
item_type = self._datatype_names[item_index]
item_error['error_value'] = item
if item_rules['value_datatype'] == 'null':
pass
else:
if item_type != item_rules['value_datatype']:
raise InputValidationError(item_error)
# call appropriate validation sub-routine for datatype of item
if item_type == 'boolean':
input_list[i] = self._validate_boolean(item, input_path, object_title)
elif item_type == 'number':
input_list[i] = self._validate_number(item, input_path, object_title)
elif item_type == 'string':
input_list[i] = self._validate_string(item, input_path, object_title)
elif item_type == 'map':
input_list[i] = self._validate_dict(item, schema_list[0], input_path, object_title)
elif item_type == 'list':
input_list[i] = self._validate_list(item, schema_list[0], input_path, object_title)
# validate unique values in list
if 'unique_values' in list_rules.keys():
if len(set(input_list)) < len(input_list):
list_error['failed_test'] = 'unique_values'
list_error['error_value'] = input_list
list_error['error_code'] = 4033
raise InputValidationError(list_error)
# TODO: validate top-level item values against identical to reference
# TODO: run lambda function and call validation url
return input_list
def _validate_number(self, input_number, path_to_root, object_title=''):
'''
a helper method for validating properties of a number
:return: input_number
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_number,
'error_code': 4001
}
if 'integer_data' in input_criteria.keys():
if input_criteria['integer_data'] and not isinstance(input_number, int):
error_dict['failed_test'] = 'integer_data'
error_dict['error_code'] = 4021
raise InputValidationError(error_dict)
if 'min_value' in input_criteria.keys():
if input_number < input_criteria['min_value']:
error_dict['failed_test'] = 'min_value'
error_dict['error_code'] = 4022
raise InputValidationError(error_dict)
if 'max_value' in input_criteria.keys():
if input_number > input_criteria['max_value']:
error_dict['failed_test'] = 'max_value'
error_dict['error_code'] = 4023
raise InputValidationError(error_dict)
if 'greater_than' in input_criteria.keys():
if input_number <= input_criteria['greater_than']:
error_dict['failed_test'] = 'greater_than'
error_dict['error_code'] = 4024
raise InputValidationError(error_dict)
if 'less_than' in input_criteria.keys():
if input_number >= input_criteria['less_than']:
error_dict['failed_test'] = 'less_than'
error_dict['error_code'] = 4025
raise InputValidationError(error_dict)
if 'equal_to' in input_criteria.keys():
if input_number != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
if 'discrete_values' in input_criteria.keys():
if input_number not in input_criteria['discrete_values']:
error_dict['failed_test'] = 'discrete_values'
error_dict['error_code'] = 4041
raise InputValidationError(error_dict)
if 'excluded_values' in input_criteria.keys():
if input_number in input_criteria['excluded_values']:
error_dict['failed_test'] = 'excluded_values'
error_dict['error_code'] = 4042
raise InputValidationError(error_dict)
# TODO: validate number against identical to reference
# TODO: run lambda function and call validation url
return input_number
def _validate_string(self, input_string, path_to_root, object_title=''):
'''
a helper method for validating properties of a string
:return: input_string
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_string,
'error_code': 4001
}
if 'byte_data' in input_criteria.keys():
if input_criteria['byte_data']:
error_dict['failed_test'] = 'byte_data'
error_dict['error_code'] = 4011
try:
decoded_bytes = b64decode(input_string)
except:
raise InputValidationError(error_dict)
if not isinstance(decoded_bytes, bytes):
raise InputValidationError(error_dict)
if 'min_value' in input_criteria.keys():
if input_string < input_criteria['min_value']:
error_dict['failed_test'] = 'min_value'
error_dict['error_code'] = 4022
raise InputValidationError(error_dict)
if 'max_value' in input_criteria.keys():
if input_string > input_criteria['max_value']:
error_dict['failed_test'] = 'max_value'
error_dict['error_code'] = 4023
raise InputValidationError(error_dict)
if 'greater_than' in input_criteria.keys():
if input_string <= input_criteria['greater_than']:
error_dict['failed_test'] = 'greater_than'
error_dict['error_code'] = 4024
raise InputValidationError(error_dict)
if 'less_than' in input_criteria.keys():
if input_string >= input_criteria['less_than']:
error_dict['failed_test'] = 'less_than'
error_dict['error_code'] = 4025
raise InputValidationError(error_dict)
if 'equal_to' in input_criteria.keys():
if input_string != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
if 'min_length' in input_criteria.keys():
if len(input_string) < input_criteria['min_length']:
error_dict['failed_test'] = 'min_length'
error_dict['error_code'] = 4012
raise InputValidationError(error_dict)
if 'max_length' in input_criteria.keys():
if len(input_string) > input_criteria['max_length']:
error_dict['failed_test'] = 'max_length'
error_dict['error_code'] = 4013
raise InputValidationError(error_dict)
if 'must_not_contain' in input_criteria.keys():
for regex in input_criteria['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(input_string):
error_dict['failed_test'] = 'must_not_contain'
error_dict['error_code'] = 4014
raise InputValidationError(error_dict)
if 'must_contain' in input_criteria.keys():
for regex in input_criteria['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(input_string):
error_dict['failed_test'] = 'must_contain'
error_dict['error_code'] = 4015
raise InputValidationError(error_dict)
if 'contains_either' in input_criteria.keys():
regex_match = False
for regex in input_criteria['contains_either']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(input_string):
regex_match = True
if not regex_match:
error_dict['failed_test'] = 'contains_either'
error_dict['error_code'] = 4016
raise InputValidationError(error_dict)
if 'discrete_values' in input_criteria.keys():
if input_string not in input_criteria['discrete_values']:
error_dict['failed_test'] = 'discrete_values'
error_dict['error_code'] = 4041
raise InputValidationError(error_dict)
if 'excluded_values' in input_criteria.keys():
if input_string in input_criteria['excluded_values']:
error_dict['failed_test'] = 'excluded_values'
error_dict['error_code'] = 4042
raise InputValidationError(error_dict)
# TODO: validate string against identical to reference
# TODO: run lambda function and call validation url
return input_string
def _validate_boolean(self, input_boolean, path_to_root, object_title=''):
'''
a helper method for validating properties of a boolean
:return: input_boolean
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_boolean,
'error_code': 4001
}
if 'equal_to' in input_criteria.keys():
if input_boolean != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
# TODO: validate boolean against identical to reference
# TODO: run lambda function and call validation url
return input_boolean
def _ingest_dict(self, input_dict, schema_dict, path_to_root):
'''
a helper method for ingesting keys, value pairs in a dictionary
:return: valid_dict
'''
valid_dict = {}
# construct path to root for rules
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
# iterate over keys in schema dict
for key, value in schema_dict.items():
key_path = path_to_root
if not key_path == '.':
key_path += '.'
key_path += key
rules_key_path = re.sub('\[\d+\]', '[0]', key_path)
value_match = False
if key in input_dict.keys():
value_index = self._datatype_classes.index(value.__class__)
value_type = self._datatype_names[value_index]
try:
v_index = self._datatype_classes.index(input_dict[key].__class__)
v_type = self._datatype_names[v_index]
if v_type == value_type:
value_match = True
except:
value_match = False
if value_match:
if value_type == 'null':
valid_dict[key] = input_dict[key]
elif value_type == 'boolean':
valid_dict[key] = self._ingest_boolean(input_dict[key], key_path)
elif value_type == 'number':
valid_dict[key] = self._ingest_number(input_dict[key], key_path)
elif value_type == 'string':
valid_dict[key] = self._ingest_string(input_dict[key], key_path)
elif value_type == 'map':
valid_dict[key] = self._ingest_dict(input_dict[key], schema_dict[key], key_path)
elif value_type == 'list':
valid_dict[key] = self._ingest_list(input_dict[key], schema_dict[key], key_path)
else:
value_type = self.keyMap[rules_key_path]['value_datatype']
if 'default_value' in self.keyMap[rules_key_path]:
valid_dict[key] = self.keyMap[rules_key_path]['default_value']
elif value_type == 'null':
valid_dict[key] = None
elif value_type == 'boolean':
valid_dict[key] = False
elif value_type == 'number':
valid_dict[key] = 0.0
if 'integer_data' in self.keyMap[rules_key_path].keys():
if self.keyMap[rules_key_path]['integer_data']:
valid_dict[key] = 0
elif value_type == 'string':
valid_dict[key] = ''
elif value_type == 'list':
valid_dict[key] = []
elif value_type == 'map':
valid_dict[key] = self._ingest_dict({}, schema_dict[key], key_path)
# add extra fields if set to True
if self.keyMap[rules_path_to_root]['extra_fields']:
for key, value in input_dict.items():
if key not in valid_dict.keys():
valid_dict[key] = value
return valid_dict
def _ingest_number(self, input_number, path_to_root):
'''
a helper method for ingesting a number
:return: valid_number
'''
valid_number = 0.0
try:
valid_number = self._validate_number(input_number, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_number = self.keyMap[rules_path_to_root]['default_value']
elif 'integer_data' in self.keyMap[rules_path_to_root].keys():
if self.keyMap[rules_path_to_root]['integer_data']:
valid_number = 0
return valid_number
def _ingest_string(self, input_string, path_to_root):
'''
a helper method for ingesting a string
:return: valid_string
'''
valid_string = ''
try:
valid_string = self._validate_string(input_string, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_string = self.keyMap[rules_path_to_root]['default_value']
return valid_string
def _ingest_boolean(self, input_boolean, path_to_root):
'''
a helper method for ingesting a boolean
:return: valid_boolean
'''
valid_boolean = False
try:
valid_boolean = self._validate_boolean(input_boolean, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_boolean = self.keyMap[rules_path_to_root]['default_value']
return valid_boolean
def _reconstruct(self, path_to_root):
'''
a helper method for finding the schema endpoint from a path to root
:param path_to_root: string with dot path to root from
:return: list, dict, string, number, or boolean at path to root
'''
# split path to root into segments
item_pattern = re.compile('\d+\\]')
dot_pattern = re.compile('\\.|\\[')
path_segments = dot_pattern.split(path_to_root)
# construct base schema endpoint
schema_endpoint = self.schema
# reconstruct schema endpoint from segments
if path_segments[1]:
for i in range(1,len(path_segments)):
if item_pattern.match(path_segments[i]):
schema_endpoint = schema_endpoint[0]
else:
schema_endpoint = schema_endpoint[path_segments[i]]
return schema_endpoint
def _walk(self, path_to_root, record_dict):
'''
a helper method for finding the record endpoint from a path to root
:param path_to_root: string with dot path to root from
:param record_dict:
:return: list, dict, string, number, or boolean at path to root
'''
# split path to root into segments
item_pattern = re.compile('\d+\\]')
dot_pattern = re.compile('\\.|\\[')
path_segments = dot_pattern.split(path_to_root)
# construct empty fields
record_endpoints = []
# determine starting position
if not path_segments[0]:
path_segments.pop(0)
# define internal recursive function
def _walk_int(path_segments, record_dict):
record_endpoint = record_dict
for i in range(0, len(path_segments)):
if item_pattern.match(path_segments[i]):
for j in range(0, len(record_endpoint)):
if len(path_segments) == 2:
record_endpoints.append(record_endpoint[j])
else:
stop_chain = False
for x in range(0, i):
if item_pattern.match(path_segments[x]):
stop_chain = True
if not stop_chain:
shortened_segments = []
for z in range(i + 1, len(path_segments)):
shortened_segments.append(path_segments[z])
_walk_int(shortened_segments, record_endpoint[j])
else:
stop_chain = False
for y in range(0, i):
if item_pattern.match(path_segments[y]):
stop_chain = True
if not stop_chain:
if len(path_segments) == i + 1:
record_endpoints.append(record_endpoint[path_segments[i]])
else:
record_endpoint = record_endpoint[path_segments[i]]
# conduct recursive walk
_walk_int(path_segments, record_dict)
return record_endpoints
def validate(self, input_data, path_to_root='', object_title=''):
'''
a core method for validating input against the model
input_data is only returned if all data is valid
:param input_data: list, dict, string, number, or boolean to validate
:param path_to_root: [optional] string with dot-path of model component
:param object_title: [optional] string with name of input to validate
:return: input_data (or InputValidationError)
'''
__name__ = '%s.validate' % self.__class__.__name__
_path_arg = '%s(path_to_root="...")' % __name__
_title_arg = '%s(object_title="...")' % __name__
# validate input
copy_path = path_to_root
if path_to_root:
if not isinstance(path_to_root, str):
raise ModelValidationError('%s must be a string.' % _path_arg)
else:
if path_to_root[0] != '.':
copy_path = '.%s' % path_to_root
if not copy_path in self.keyMap.keys():
raise ModelValidationError('%s does not exist in components %s.' % (_path_arg.replace('...', path_to_root), self.keyMap.keys()))
else:
copy_path = '.'
if object_title:
if not isinstance(object_title, str):
raise ModelValidationError('%s must be a string' % _title_arg)
# construct generic error dictionary
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[copy_path],
'failed_test': 'value_datatype',
'input_path': copy_path,
'error_value': input_data,
'error_code': 4001
}
# determine value type of input data
try:
input_index = self._datatype_classes.index(input_data.__class__)
except:
error_dict['error_value'] = input_data.__class__.__name__
raise InputValidationError(error_dict)
input_type = self._datatype_names[input_index]
# validate input data type
if input_type != self.keyMap[copy_path]['value_datatype']:
raise InputValidationError(error_dict)
# run helper method appropriate to data type
if input_type == 'boolean':
input_data = self._validate_boolean(input_data, copy_path, object_title)
elif input_type == 'number':
input_data = self._validate_number(input_data, copy_path, object_title)
elif input_type == 'string':
input_data = self._validate_string(input_data, copy_path, object_title)
elif input_type == 'list':
schema_list = self._reconstruct(copy_path)
input_data = self._validate_list(input_data, schema_list, copy_path, object_title)
elif input_type == 'map':
schema_dict = self._reconstruct(copy_path)
input_data = self._validate_dict(input_data, schema_dict, copy_path, object_title)
return input_data
def ingest(self, **kwargs):
'''
a core method to ingest and validate arbitrary keyword data
**NOTE: data is always returned with this method**
for each key in the model, a value is returned according
to the following priority:
1. value in kwargs if field passes validation test
2. default value declared for the key in the model
3. empty value appropriate to datatype of key in the model
**NOTE: as long as a default value is provided for each key-
value, returned data will be model valid
**NOTE: if 'extra_fields' is True for a dictionary, the key-
value pair of all fields in kwargs which are not declared in
the model will also be added to the corresponding dictionary
data
**NOTE: if 'max_size' is declared for a list, method will
stop adding input to the list once it reaches max size
:param kwargs: key, value pairs
:return: dictionary with keys and value
'''
__name__ = '%s.ingest' % self.__class__.__name__
schema_dict = self.schema
path_to_root = '.'
valid_data = self._ingest_dict(kwargs, schema_dict, path_to_root)
return valid_data
def query(self, query_criteria, valid_record=None):
'''
a core method for querying model valid data with criteria
**NOTE: input is only returned if all fields & qualifiers are valid for model
:param query_criteria: dictionary with model field names and query qualifiers
:param valid_record: dictionary with model valid record
:return: boolean (or QueryValidationError)
an example of how to construct the query_criteria argument:
query_criteria = {
'.path.to.number': {
'min_value': 4.5
},
'.path.to.string': {
'must_contain': [ '\\regex' ]
}
}
**NOTE: for a full list of operators for query_criteria based upon field
datatype, see either the query-rules.json file or REFERENCE file
'''
__name__ = '%s.query' % self.__class__.__name__
_query_arg = '%s(query_criteria={...})' % __name__
_record_arg = '%s(valid_record={...})' % __name__
# validate input
if not isinstance(query_criteria, dict):
raise ModelValidationError('%s must be a dictionary.' % _query_arg)
# convert javascript dot_path to class dot_path
criteria_copy = {}
equal_fields = []
dot_fields = []
for key, value in query_criteria.items():
copy_key = key
if not key:
copy_key = '.'
else:
if key[0] != '.':
copy_key = '.%s' % key
dot_fields.append(copy_key)
criteria_copy[copy_key] = value
if value.__class__ in self._datatype_classes[0:4]:
criteria_copy[copy_key] = {
'equal_to': value
}
equal_fields.append(copy_key)
# validate query criteria against query rules
query_kwargs = {
'fields_dict': criteria_copy,
'fields_rules': self.queryRules,
'declared_value': False
}
try:
self._validate_fields(**query_kwargs)
except ModelValidationError as err:
message = err.error['message']
for field in equal_fields:
equal_error = 'field %s qualifier equal_to' % field
if message.find(equal_error) > -1:
message = message.replace(equal_error, 'field %s' % field)
break
field_pattern = re.compile('ield\s(\..*?)\s')
field_name = field_pattern.findall(message)
if field_name:
if field_name[0] in dot_fields:
def _replace_field(x):
return 'ield %s ' % x.group(1)[1:]
message = field_pattern.sub(_replace_field, message)
raise QueryValidationError(message)
# query test record
if valid_record:
if not isinstance(valid_record, dict):
raise ModelValidationError('%s must be a dictionary.' % _record_arg)
for key, value in criteria_copy.items():
eval_outcome = self._evaluate_field(valid_record, key, value)
if not eval_outcome:
return False
return True
|
collectiveacuity/jsonModel
|
jsonmodel/validators.py
|
jsonModel._ingest_number
|
python
|
def _ingest_number(self, input_number, path_to_root):
'''
a helper method for ingesting a number
:return: valid_number
'''
valid_number = 0.0
try:
valid_number = self._validate_number(input_number, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_number = self.keyMap[rules_path_to_root]['default_value']
elif 'integer_data' in self.keyMap[rules_path_to_root].keys():
if self.keyMap[rules_path_to_root]['integer_data']:
valid_number = 0
return valid_number
|
a helper method for ingesting a number
:return: valid_number
|
train
|
https://github.com/collectiveacuity/jsonModel/blob/1ea64c36d78add3faa7b85ff82c5ec685458c940/jsonmodel/validators.py#L1327-L1347
| null |
class jsonModel(object):
__rules__ = jsonLoader('jsonmodel', 'models/model-rules.json')
def __init__(self, data_model, query_rules=None):
'''
a method for testing data model declaration & initializing the class
:param data_model: dictionary with json model architecture
:param query_rules: [optional] dictionary with valid field type qualifiers
:return: object with jsonModel methods
'''
# validate schema input
if not isinstance(data_model, dict):
raise ModelValidationError('Data model must be a dictionary.')
elif 'schema' not in data_model.keys():
raise ModelValidationError('Data model must have a schema key.')
elif not isinstance(data_model['schema'], dict):
raise ModelValidationError('Value for the data model "schema" field must be a dictionary.')
elif not data_model['schema']:
raise ModelValidationError('Data model "schema" field must not be empty.')
# construct base methods
from copy import deepcopy
data_model = deepcopy(data_model)
self.schema = data_model['schema']
model_map = mapModel(self.schema)
self.keyName = model_map.keyName
self.keyCriteria = model_map.keyCriteria
# construct protected type classes
self._datatype_names = mapModel._datatype_names
self._datatype_classes = mapModel._datatype_classes
# validate absence of item designators in keys
item_pattern = re.compile('\[\d+\]')
for i in range(len(self.keyName)):
patterns_found = item_pattern.findall(self.keyName[i])
if patterns_found:
for designator in patterns_found:
if designator != '[0]':
message = 'Key name for schema field %s must not contain the item designator pattern %s' % (self.keyName[i], designator)
raise ModelValidationError(message)
# validate existence of first item in list declarations
key_set = set(self.keyName)
for i in range(len(self.keyName)):
if self.keyCriteria[i]['value_datatype'] == 'list':
item_key = self.keyName[i] + '[0]'
if not item_key in key_set:
message = 'Schema field %s must declare an initial item for the list.' % self.keyName[i]
raise ModelValidationError(message)
# alter list requirement if first item is empty
else:
item_index = self.keyName.index(item_key)
if not self.keyCriteria[item_index]['required_field']:
self.keyCriteria[i]['required_field'] = False
# validate title input & construct title method
self.title = ''
if 'title' in data_model.keys():
if not isinstance(data_model['title'], str):
raise ModelValidationError('Value for model title must be a string.')
self.title = data_model['title']
# validate description input & construct description method
self.description = ''
if 'description' in data_model.keys():
if not isinstance(data_model['description'], str):
raise ModelValidationError('Value for model description must be a string.')
self.description = data_model['description']
# validate url input & construct title method
self.url = ''
if 'url' in data_model.keys():
if not isinstance(data_model['url'], str):
raise ModelValidationError('Value for model url must be a string.')
self.title = data_model['url']
# validate metadata input & construct metadata method
self.metadata = {}
if 'metadata' in data_model.keys():
if not isinstance(data_model['metadata'], dict):
raise ModelValidationError('Value for model metadata must be a dictionary.')
self.metadata = data_model['metadata']
# validate max size input & construct maxSize property
# self.maxSize = None
# if 'max_size' in data_model.keys():
# if not isinstance(data_model['max_size'], int):
# raise ModelValidationError('Value for model max_size must be a positive integer.')
# elif data_model['max_size'] < 0:
# raise ModelValidationError('Value for model max_size must be a positive integer.')
# elif data_model['max_size']:
# self.maxSize = data_model['max_size']
# validate components input & construct component property
self.components = {}
if 'components' in data_model.keys():
if not isinstance(data_model['components'], dict):
raise ModelValidationError('Value for model components must be a dictionary.')
self.components = self._validate_fields(data_model['components'], self.__rules__['components'])
# construct keyMap fields from key names and key criteria
self.keyMap = {}
for i in range(len(self.keyName)):
self.keyMap[self.keyName[i]] = self.keyCriteria[i]
for key, value in self.components.items():
# convert javascript dot_path to class dot_path
dot_key = ''
if not key:
dot_key = '.'
else:
if key[0] != '.':
dot_key = '.%s' % key
# add component declarations to keyMap
if key in self.keyMap.keys():
for k, v in self.components[key].items():
self.keyMap[key][k] = v
elif dot_key and dot_key in self.keyMap.keys():
for k, v in self.components[key].items():
self.keyMap[dot_key][k] = v
# validate default values in lists
self._validate_defaults(self.keyMap)
# construct queryRules property from class model rules
self.queryRules = {}
for key, value in self.__rules__['components'].items():
remove_from_query = [ 'required_field', 'default_value', 'example_values', 'field_title', 'field_position', 'field_description', 'field_metadata', 'extra_fields' ]
field_qualifiers = {
'value_exists': False
}
for k, v in value.items():
if k not in remove_from_query:
field_qualifiers[k] = v
self.queryRules[key] = field_qualifiers
# validate query rules input and replace queryRules property
if query_rules:
if not isinstance(query_rules, dict):
message = 'Value for query rules input must be a dictionary.'
raise ModelValidationError(message)
input_set = set(query_rules.keys())
req_set = set(self.queryRules.keys())
if input_set - req_set:
message = 'Query rules input may only have %s field key names.' % req_set
raise ModelValidationError(message)
elif req_set - input_set:
message = 'Query rules input must have all %s field key names.' % req_set
raise ModelValidationError(message)
for key in req_set:
if not isinstance(query_rules[key], dict):
message = 'Value for query rules %s field must be a dictionary.' % key
raise ModelValidationError(message)
input_qualifier_set = set(query_rules[key].keys())
req_qualifier_set = set(self.queryRules[key].keys())
if input_qualifier_set - req_qualifier_set:
message = 'Query rules field %s may only have qualifiers %s' % (key, req_qualifier_set)
raise ModelValidationError(message)
for k, v in query_rules[key].items():
if v.__class__ != self.queryRules[key][k].__class__:
qualifier_index = self._datatype_classes.index(self.queryRules[key][k].__class__)
qualifier_type = self._datatype_names[qualifier_index]
message = 'Value for query rules field %s qualifier %s must be a "%s" datatype.' % (key, k, qualifier_type)
raise ModelValidationError(message)
self.queryRules = query_rules
def _validate_fields(self, fields_dict, fields_rules, declared_value=True):
# validate key names in fields
for key, value in fields_dict.items():
# convert javascript dot_path to class dot_path
if not key:
key = '.'
else:
if key[0] != '.':
key = '.%s' % key
if key not in self.keyName:
raise ModelValidationError('Field %s is not a field declared in model schema.' % key)
elif not isinstance(value, dict):
raise ModelValidationError('Value for field %s must be a dictionary.' % key)
# validate field criteria are appropriate to field datatype
value_type = self.keyCriteria[self.keyName.index(key)]['value_datatype']
type_dict = {}
if value_type == 'string':
type_dict = fields_rules['.string_fields']
elif value_type == 'number':
type_dict = fields_rules['.number_fields']
elif value_type == 'boolean':
type_dict = fields_rules['.boolean_fields']
elif value_type == 'list':
type_dict = fields_rules['.list_fields']
elif value_type == 'map':
type_dict = fields_rules['.map_fields']
elif value_type == 'null':
type_dict = fields_rules['.null_fields']
if set(value.keys()) - set(type_dict.keys()):
raise ModelValidationError('Field %s may only have datatype %s qualifiers %s.' % (key, value_type, set(type_dict.keys())))
# validate criteria qualifier values are appropriate datatype
for k, v in value.items():
v_index = self._datatype_classes.index(v.__class__)
v_type = self._datatype_names[v_index]
qualifier_index = self._datatype_classes.index(type_dict[k].__class__)
qualifier_type = self._datatype_names[qualifier_index]
if v_type != qualifier_type:
message = 'Value for field %s qualifier %s must be a %s datatype.' % (key, k, qualifier_type)
raise ModelValidationError(message)
if qualifier_type == 'number':
if isinstance(type_dict[k], int):
if not isinstance(v, int):
message = 'Value for field %s qualifier %s must be an integer.' % (key, k)
raise ModelValidationError(message)
# validate internal logic of each qualifier value declaration
if k in ('must_not_contain', 'must_contain', 'contains_either'):
for item in v:
if not isinstance(item, str):
message = 'Each item in list field %s qualifier %s must be a string.' % (key, k)
raise ModelValidationError(message)
if k in ('min_length', 'max_length', 'min_size', 'max_size'):
if v < 0:
message = 'Value for field %s qualifier %s cannot be negative.' % (key, k)
raise ModelValidationError(message)
if k in ('discrete_values', 'excluded_values', 'example_values'):
for item in v:
if value_type == 'number':
if not isinstance(item, int) and not isinstance(item, float):
message = 'Each item in field %s qualifier %s list must be a number.' % (key, k)
raise ModelValidationError(message)
elif not isinstance(item, str):
message = 'Each item in list for field %s qualifier %s must be a string.' % (key, k)
raise ModelValidationError(message)
if k == 'identical_to':
if not v in self.keyName:
message = 'Value "%s" for field %s qualifier %s not found in components keys.' % (v, key, k)
raise ModelValidationError(message)
if k == 'unique_values':
if v:
item_name = key + '[0]'
item_type = self.keyCriteria[self.keyName.index(item_name)]['value_datatype']
if not item_type in ('number', 'string'):
message = 'Field %s[0] must be either a string or number if qualifier "unique_values": true' % key
raise ModelValidationError(message)
# validate lack of other qualifiers if value exist is false
if 'value_exists' in value.keys():
if not value['value_exists']:
if set(value.keys()) - {'value_exists'}:
message = 'If field %s qualifier value_exists: false, field may not have other qualifiers.' % key
raise ModelValidationError(message)
# validate size qualifiers against each other
size_qualifiers = ['min_size', 'max_size']
for qualifier in size_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
header = 'Value %s for %s' % (test_value, value_path)
if 'min_size' in value.keys():
if test_value < value['min_size']:
message = '%s must not be less than "min_size": %s' % (header, value['min_size'])
raise ModelValidationError(message)
if 'max_size' in value.keys():
if test_value > value['max_size']:
message = '%s must not be greater than "max_size": %s' % (header, value['max_size'])
raise ModelValidationError(message)
# validate length qualifiers against each other
length_qualifiers = ['min_length', 'max_length']
for qualifier in length_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
header = 'Value %s for %s' % (test_value, value_path)
if 'min_length' in value.keys():
if test_value < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if test_value > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
# validate range qualifiers against each other & length qualifiers
range_qualifiers = ['min_value', 'max_value', 'greater_than', 'less_than', 'equal_to']
for qualifier in range_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
header = 'Value %s%s%s for %s' % (quote_text, test_value, quote_text, value_path)
if 'min_value' in value.keys():
if test_value < value['min_value']:
message = '%s must not be less than "min_value": %s' % (header, value['min_value'])
raise ModelValidationError(message)
if 'max_value' in value.keys():
if test_value > value['max_value']:
message = '%s must not be greater than "max_value": %s' % (header, value['max_value'])
raise ModelValidationError(message)
if 'greater_than' in value.keys():
if test_value <= value['greater_than'] and not qualifier == 'greater_than':
message = '%s must be "greater_than": %s' % (header, value['greater_than'])
raise ModelValidationError(message)
if 'less_than' in value.keys():
if test_value >= value['less_than'] and not qualifier == 'less_than':
message = '%s must be "less_than": %s' % (header, value['less_than'])
raise ModelValidationError(message)
if 'min_length' in value.keys():
if len(test_value) < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if len(test_value) > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
if 'integer_data' in value.keys():
if value['integer_data']:
if not isinstance(test_value, int):
message = '%s must be an "integer_data".' % header
raise ModelValidationError(message)
if 'must_not_contain' in value.keys():
for regex in value['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
message = '%s matches regex pattern in "must_not_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'must_contain' in value.keys():
for regex in value['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(test_value):
message = '%s does not match regex pattern in "must_contain": ["%s"].' % (header, regex)
raise ModelValidationError(message)
if 'contains_either' in value.keys():
regex_match = False
regex_patterns = []
for regex in value['contains_either']:
regex_patterns.append(regex)
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
regex_match = True
if not regex_match:
message = '%s does not match any regex patterns in "contains_either": %s' % (header, regex_patterns)
raise ModelValidationError(message)
if 'byte_data' in value.keys():
if value['byte_data']:
if qualifier != 'equal_to':
message = '%s cannot be used with base64 encoded "byte_data".' % header
raise ModelValidationError(message)
# validate discrete value qualifiers against other criteria
schema_field = self.keyCriteria[self.keyName.index(key)]
discrete_qualifiers = ['declared_value', 'default_value', 'excluded_values', 'discrete_values', 'example_values']
for qualifier in discrete_qualifiers:
test_qualifier = False
if qualifier in schema_field:
test_qualifier = True
if qualifier == 'declared_value' and not schema_field[qualifier]:
test_qualifier = False
if qualifier in value.keys() or (test_qualifier and declared_value):
multiple_values = False
if qualifier in value.keys():
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
else:
test_list = [schema_field[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
header = 'Value %s%s%s for %s%s' % (quote_text, test_value, quote_text, value_path, item_text)
if 'min_value' in value.keys():
if test_value < value['min_value']:
message = '%s must not be less than "min_value": %s' % (header, value['min_value'])
raise ModelValidationError(message)
if 'max_value' in value.keys():
if test_value > value['max_value']:
message = '%s must not be greater than "max_value": %s' % (header, value['max_value'])
raise ModelValidationError(message)
if 'equal_to' in value.keys():
if test_value != value['equal_to']:
if qualifier != 'declared_value' and isinstance(test_value, bool):
message = '%s must be "equal_to": %s' % (header, value['equal_to'])
raise ModelValidationError(message)
if 'greater_than' in value.keys():
if test_value <= value['greater_than']:
message = '%s must be "greater_than": %s' % (header, value['greater_than'])
raise ModelValidationError(message)
if 'less_than' in value.keys():
if test_value >= value['less_than']:
message = '%s must be "less_than": %s' % (header, value['less_than'])
raise ModelValidationError(message)
if 'integer_data' in value.keys():
if value['integer_data']:
if not isinstance(test_value, int):
message = '%s must be an "integer_data".' % header
raise ModelValidationError(message)
if 'min_length' in value.keys():
if len(test_value) < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if len(test_value) > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
if 'must_not_contain' in value.keys():
for regex in value['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
message = '%s matches regex pattern in "must_not_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'must_contain' in value.keys():
for regex in value['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(test_value):
message = '%s does not match regex pattern in "must_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'contains_either' in value.keys():
regex_match = False
regex_patterns = []
for regex in value['contains_either']:
regex_patterns.append(regex)
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
regex_match = True
if not regex_match:
message = '%s does not match any regex patterns in "contains_either": %s' % (header, regex_patterns)
raise ModelValidationError(message)
if 'byte_data' in value.keys():
message = '%s cannot be base64 decoded to "byte_data".' % header
try:
decoded_bytes = b64decode(test_value)
except:
raise ModelValidationError(message)
if not isinstance(decoded_bytes, bytes):
raise ModelValidationError(message)
# validate discrete value qualifiers against each other
for qualifier in discrete_qualifiers:
test_qualifier = False
if qualifier in schema_field:
test_qualifier = True
if qualifier == 'declared_value' and not schema_field[qualifier]:
test_qualifier = False
if qualifier in value.keys() or (test_qualifier and declared_value):
multiple_values = False
if qualifier in value.keys():
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
else:
test_list = [schema_field[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
header = 'Value %s%s%s for %s%s' % (quote_text, test_value, quote_text, value_path, item_text)
if 'excluded_values' in value.keys():
if not qualifier == 'excluded_values':
if test_value in value['excluded_values']:
message = '%s cannot be one of "excluded_values": %s.' % (header, value['excluded_values'])
raise ModelValidationError(message)
if 'discrete_values' in value.keys():
if not qualifier == 'excluded_values':
if test_value not in value['discrete_values']:
message = '%s must be one of "discrete_values": %s' % (header, value['discrete_values'])
raise ModelValidationError(message)
return fields_dict
def _validate_defaults(self, fields_dict):
# validate key names in fields
for key, value in fields_dict.items():
# retrieve value type and type dict
value_type = self.keyCriteria[self.keyName.index(key)]['value_datatype']
# validate discrete value qualifiers against other criteria
qualifier = 'default_value'
if qualifier in value.keys():
multiple_values = False
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
qualifier_text = value_path + item_text
header = 'Value %s%s%s for %s' % (quote_text, test_value, quote_text, qualifier_text)
if value_type == 'list':
default_item_key = '%s[0]' % key
try:
self.validate(test_value, default_item_key, object_title=header)
except Exception as err:
raise ModelValidationError(str(err).strip().replace('field %s' % default_item_key, qualifier_text))
return fields_dict
def _evaluate_field(self, record_dict, field_name, field_criteria):
''' a helper method for evaluating record values based upon query criteria
:param record_dict: dictionary with model valid data to evaluate
:param field_name: string with path to root of query field
:param field_criteria: dictionary with query operators and qualifiers
:return: boolean (True if no field_criteria evaluate to false)
'''
# determine value existence criteria
value_exists = True
if 'value_exists' in field_criteria.keys():
if not field_criteria['value_exists']:
value_exists = False
# validate existence of field
field_exists = True
try:
record_values = self._walk(field_name, record_dict)
except:
field_exists = False
# evaluate existence query criteria
if value_exists != field_exists:
return False
elif not value_exists:
return True
# convert javascript dot_path to class dot_path
field_key = field_name
if not field_name:
field_key = '.'
else:
if field_name[0] != '.':
field_key = '.%s' % field_name
# evaluate other query criteria
for key, value in field_criteria.items():
if key in ('min_size', 'min_length'):
found = False
if self.keyMap[field_key]['value_datatype'] == 'map':
for record_value in record_values:
record_size = sys.getsizeof(json.dumps(str(record_value)).replace(' ','')) - 51
if record_size >= value:
found = True
break
else:
for record_value in record_values:
if len(record_value) >= value:
found = True
break
if not found:
return False
elif key in ('max_size', 'max_length'):
found = False
if self.keyMap[field_key]['value_datatype'] == 'map':
for record_value in record_values:
record_size = sys.getsizeof(json.dumps(str(record_value)).replace(' ','')) - 51
if record_size <= value:
found = True
break
else:
for record_value in record_values:
if len(record_value) <= value:
found = True
break
if not found:
return False
elif key == 'min_value':
found = False
for record_value in record_values:
if record_value >= value:
found = True
break
if not found:
return False
elif key == 'max_value':
found = False
for record_value in record_values:
if record_value <= value:
found = True
break
if not found:
return False
elif key == 'equal_to':
found = False
for record_value in record_values:
if record_value == value:
found = True
break
if not found:
return False
elif key == 'greater_than':
found = False
for record_value in record_values:
if record_value > value:
found = True
break
if not found:
return False
elif key == 'less_than':
found = False
for record_value in record_values:
if record_value < value:
found = True
break
if not found:
return False
elif key == 'excluded_values':
for record_value in record_values:
if record_value in value:
return False
elif key == 'discrete_values':
found = False
for record_value in record_values:
if record_value in value:
found = True
break
if not found:
return False
elif key == 'integer_data':
found = False
dummy_int = 1
for record_value in record_values:
if record_value.__class__ == dummy_int.__class__:
found = True
break
if value != found:
return False
elif key == 'byte_data':
found = False
for record_value in record_values:
try:
decoded_bytes = b64decode(record_value)
except:
decoded_bytes = ''
if isinstance(decoded_bytes, bytes):
found = True
break
if value != found:
return False
elif key == 'must_contain':
for regex in value:
regex_pattern = re.compile(regex)
found = False
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if not found:
return False
elif key == 'must_not_contain':
for regex in value:
regex_pattern = re.compile(regex)
found = False
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if found:
return False
elif key == 'contains_either':
found = False
for regex in value:
regex_pattern = re.compile(regex)
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if found:
break
if not found:
return False
elif key == 'unique_values':
for record_value in record_values:
unique_values = True
if len(record_value) != len(set(record_value)):
unique_values = False
if value != unique_values:
return False
return True
def _validate_dict(self, input_dict, schema_dict, path_to_root, object_title=''):
''' a helper method for recursively validating keys in dictionaries
:return input_dict
'''
# reconstruct key path to current dictionary in model
rules_top_level_key = re.sub('\[\d+\]', '[0]', path_to_root)
map_rules = self.keyMap[rules_top_level_key]
# construct list error report template
map_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': map_rules,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': 0,
'error_code': 4001
}
# validate map size
if 'min_size' in map_rules.keys():
input_size = sys.getsizeof(json.dumps(str(input_dict)).replace(' ','')) - 51
if input_size < map_rules['min_size']:
map_error['failed_test'] = 'min_size'
map_error['error_value'] = input_size
map_error['error_code'] = 4031
raise InputValidationError(map_error)
if 'max_size' in map_rules.keys():
input_size = sys.getsizeof(json.dumps(str(input_dict)).replace(' ','')) - 51
if input_size > map_rules['max_size']:
map_error['failed_test'] = 'max_size'
map_error['error_value'] = input_size
map_error['error_code'] = 4032
raise InputValidationError(map_error)
# construct lists of keys in input dictionary
input_keys = []
input_key_list = []
for key in input_dict.keys():
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'key_datatype',
'input_path': path_to_root,
'error_value': key,
'error_code': 4004
}
error_dict['input_criteria']['key_datatype'] = 'string'
if path_to_root == '.':
if not isinstance(key, str):
input_key_name = path_to_root + str(key)
error_dict['input_path'] = input_key_name
raise InputValidationError(error_dict)
input_key_name = path_to_root + key
else:
if not isinstance(key, str):
input_key_name = path_to_root + '.' + str(key)
error_dict['input_path'] = input_key_name
raise InputValidationError(error_dict)
input_key_name = path_to_root + '.' + key
input_keys.append(input_key_name)
input_key_list.append(key)
# TODO: validate top-level key and values against identical to reference
# TODO: run lambda function and call validation
# construct lists of keys in schema dictionary
max_keys = []
max_key_list = []
req_keys = []
req_key_list = []
for key in schema_dict.keys():
if path_to_root == '.':
schema_key_name = path_to_root + key
else:
schema_key_name = path_to_root + '.' + key
max_keys.append(schema_key_name)
max_key_list.append(key)
rules_schema_key_name = re.sub('\[\d+\]', '[0]', schema_key_name)
if self.keyMap[rules_schema_key_name]['required_field']:
req_keys.append(schema_key_name)
req_key_list.append(key)
# validate existence of required fields
missing_keys = set(req_keys) - set(input_keys)
if missing_keys:
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'required_field',
'input_path': path_to_root,
'error_value': list(missing_keys),
'error_code': 4002
}
error_dict['input_criteria']['required_keys'] = req_keys
raise InputValidationError(error_dict)
# validate existence of extra fields
extra_keys = set(input_keys) - set(max_keys)
if extra_keys and not self.keyMap[rules_top_level_key]['extra_fields']:
extra_key_list = []
for key in extra_keys:
pathless_key = re.sub(rules_top_level_key, '', key, count=1)
extra_key_list.append(pathless_key)
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'extra_fields',
'input_path': path_to_root,
'error_value': extra_key_list,
'error_code': 4003
}
error_dict['input_criteria']['maximum_scope'] = max_key_list
raise InputValidationError(error_dict)
# validate datatype of value
for key, value in input_dict.items():
if path_to_root == '.':
input_key_name = path_to_root + key
else:
input_key_name = path_to_root + '.' + key
rules_input_key_name = re.sub('\[\d+\]', '[0]', input_key_name)
if input_key_name in max_keys:
input_criteria = self.keyMap[rules_input_key_name]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': input_key_name,
'error_value': value,
'error_code': 4001
}
try:
value_index = self._datatype_classes.index(value.__class__)
except:
error_dict['error_value'] = value.__class__.__name__
raise InputValidationError(error_dict)
value_type = self._datatype_names[value_index]
if input_criteria['value_datatype'] == 'null':
pass
else:
if value_type != input_criteria['value_datatype']:
raise InputValidationError(error_dict)
# call appropriate validation sub-routine for datatype of value
if value_type == 'boolean':
input_dict[key] = self._validate_boolean(value, input_key_name, object_title)
elif value_type == 'number':
input_dict[key] = self._validate_number(value, input_key_name, object_title)
elif value_type == 'string':
input_dict[key] = self._validate_string(value, input_key_name, object_title)
elif value_type == 'map':
input_dict[key] = self._validate_dict(value, schema_dict[key], input_key_name, object_title)
elif value_type == 'list':
input_dict[key] = self._validate_list(value, schema_dict[key], input_key_name, object_title)
# set default values for empty optional fields
for key in max_key_list:
if key not in input_key_list:
indexed_key = max_keys[max_key_list.index(key)]
if indexed_key in self.components.keys():
if 'default_value' in self.components[indexed_key]:
input_dict[key] = self.components[indexed_key]['default_value']
return input_dict
def _validate_list(self, input_list, schema_list, path_to_root, object_title=''):
'''
a helper method for recursively validating items in a list
:return: input_list
'''
# construct rules for list and items
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
list_rules = self.keyMap[rules_path_to_root]
initial_key = rules_path_to_root + '[0]'
item_rules = self.keyMap[initial_key]
# construct list error report template
list_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': list_rules,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': 0,
'error_code': 4001
}
# validate list rules
if 'min_size' in list_rules.keys():
if len(input_list) < list_rules['min_size']:
list_error['failed_test'] = 'min_size'
list_error['error_value'] = len(input_list)
list_error['error_code'] = 4031
raise InputValidationError(list_error)
if 'max_size' in list_rules.keys():
if len(input_list) > list_rules['max_size']:
list_error['failed_test'] = 'max_size'
list_error['error_value'] = len(input_list)
list_error['error_code'] = 4032
raise InputValidationError(list_error)
# construct item error report template
item_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': item_rules,
'failed_test': 'value_datatype',
'input_path': initial_key,
'error_value': None,
'error_code': 4001
}
# validate datatype of items
for i in range(len(input_list)):
input_path = path_to_root + '[%s]' % i
item = input_list[i]
item_error['input_path'] = input_path
try:
item_index = self._datatype_classes.index(item.__class__)
except:
item_error['error_value'] = item.__class__.__name__
raise InputValidationError(item_error)
item_type = self._datatype_names[item_index]
item_error['error_value'] = item
if item_rules['value_datatype'] == 'null':
pass
else:
if item_type != item_rules['value_datatype']:
raise InputValidationError(item_error)
# call appropriate validation sub-routine for datatype of item
if item_type == 'boolean':
input_list[i] = self._validate_boolean(item, input_path, object_title)
elif item_type == 'number':
input_list[i] = self._validate_number(item, input_path, object_title)
elif item_type == 'string':
input_list[i] = self._validate_string(item, input_path, object_title)
elif item_type == 'map':
input_list[i] = self._validate_dict(item, schema_list[0], input_path, object_title)
elif item_type == 'list':
input_list[i] = self._validate_list(item, schema_list[0], input_path, object_title)
# validate unique values in list
if 'unique_values' in list_rules.keys():
if len(set(input_list)) < len(input_list):
list_error['failed_test'] = 'unique_values'
list_error['error_value'] = input_list
list_error['error_code'] = 4033
raise InputValidationError(list_error)
# TODO: validate top-level item values against identical to reference
# TODO: run lambda function and call validation url
return input_list
def _validate_number(self, input_number, path_to_root, object_title=''):
'''
a helper method for validating properties of a number
:return: input_number
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_number,
'error_code': 4001
}
if 'integer_data' in input_criteria.keys():
if input_criteria['integer_data'] and not isinstance(input_number, int):
error_dict['failed_test'] = 'integer_data'
error_dict['error_code'] = 4021
raise InputValidationError(error_dict)
if 'min_value' in input_criteria.keys():
if input_number < input_criteria['min_value']:
error_dict['failed_test'] = 'min_value'
error_dict['error_code'] = 4022
raise InputValidationError(error_dict)
if 'max_value' in input_criteria.keys():
if input_number > input_criteria['max_value']:
error_dict['failed_test'] = 'max_value'
error_dict['error_code'] = 4023
raise InputValidationError(error_dict)
if 'greater_than' in input_criteria.keys():
if input_number <= input_criteria['greater_than']:
error_dict['failed_test'] = 'greater_than'
error_dict['error_code'] = 4024
raise InputValidationError(error_dict)
if 'less_than' in input_criteria.keys():
if input_number >= input_criteria['less_than']:
error_dict['failed_test'] = 'less_than'
error_dict['error_code'] = 4025
raise InputValidationError(error_dict)
if 'equal_to' in input_criteria.keys():
if input_number != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
if 'discrete_values' in input_criteria.keys():
if input_number not in input_criteria['discrete_values']:
error_dict['failed_test'] = 'discrete_values'
error_dict['error_code'] = 4041
raise InputValidationError(error_dict)
if 'excluded_values' in input_criteria.keys():
if input_number in input_criteria['excluded_values']:
error_dict['failed_test'] = 'excluded_values'
error_dict['error_code'] = 4042
raise InputValidationError(error_dict)
# TODO: validate number against identical to reference
# TODO: run lambda function and call validation url
return input_number
def _validate_string(self, input_string, path_to_root, object_title=''):
'''
a helper method for validating properties of a string
:return: input_string
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_string,
'error_code': 4001
}
if 'byte_data' in input_criteria.keys():
if input_criteria['byte_data']:
error_dict['failed_test'] = 'byte_data'
error_dict['error_code'] = 4011
try:
decoded_bytes = b64decode(input_string)
except:
raise InputValidationError(error_dict)
if not isinstance(decoded_bytes, bytes):
raise InputValidationError(error_dict)
if 'min_value' in input_criteria.keys():
if input_string < input_criteria['min_value']:
error_dict['failed_test'] = 'min_value'
error_dict['error_code'] = 4022
raise InputValidationError(error_dict)
if 'max_value' in input_criteria.keys():
if input_string > input_criteria['max_value']:
error_dict['failed_test'] = 'max_value'
error_dict['error_code'] = 4023
raise InputValidationError(error_dict)
if 'greater_than' in input_criteria.keys():
if input_string <= input_criteria['greater_than']:
error_dict['failed_test'] = 'greater_than'
error_dict['error_code'] = 4024
raise InputValidationError(error_dict)
if 'less_than' in input_criteria.keys():
if input_string >= input_criteria['less_than']:
error_dict['failed_test'] = 'less_than'
error_dict['error_code'] = 4025
raise InputValidationError(error_dict)
if 'equal_to' in input_criteria.keys():
if input_string != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
if 'min_length' in input_criteria.keys():
if len(input_string) < input_criteria['min_length']:
error_dict['failed_test'] = 'min_length'
error_dict['error_code'] = 4012
raise InputValidationError(error_dict)
if 'max_length' in input_criteria.keys():
if len(input_string) > input_criteria['max_length']:
error_dict['failed_test'] = 'max_length'
error_dict['error_code'] = 4013
raise InputValidationError(error_dict)
if 'must_not_contain' in input_criteria.keys():
for regex in input_criteria['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(input_string):
error_dict['failed_test'] = 'must_not_contain'
error_dict['error_code'] = 4014
raise InputValidationError(error_dict)
if 'must_contain' in input_criteria.keys():
for regex in input_criteria['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(input_string):
error_dict['failed_test'] = 'must_contain'
error_dict['error_code'] = 4015
raise InputValidationError(error_dict)
if 'contains_either' in input_criteria.keys():
regex_match = False
for regex in input_criteria['contains_either']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(input_string):
regex_match = True
if not regex_match:
error_dict['failed_test'] = 'contains_either'
error_dict['error_code'] = 4016
raise InputValidationError(error_dict)
if 'discrete_values' in input_criteria.keys():
if input_string not in input_criteria['discrete_values']:
error_dict['failed_test'] = 'discrete_values'
error_dict['error_code'] = 4041
raise InputValidationError(error_dict)
if 'excluded_values' in input_criteria.keys():
if input_string in input_criteria['excluded_values']:
error_dict['failed_test'] = 'excluded_values'
error_dict['error_code'] = 4042
raise InputValidationError(error_dict)
# TODO: validate string against identical to reference
# TODO: run lambda function and call validation url
return input_string
def _validate_boolean(self, input_boolean, path_to_root, object_title=''):
'''
a helper method for validating properties of a boolean
:return: input_boolean
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_boolean,
'error_code': 4001
}
if 'equal_to' in input_criteria.keys():
if input_boolean != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
# TODO: validate boolean against identical to reference
# TODO: run lambda function and call validation url
return input_boolean
def _ingest_dict(self, input_dict, schema_dict, path_to_root):
'''
a helper method for ingesting keys, value pairs in a dictionary
:return: valid_dict
'''
valid_dict = {}
# construct path to root for rules
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
# iterate over keys in schema dict
for key, value in schema_dict.items():
key_path = path_to_root
if not key_path == '.':
key_path += '.'
key_path += key
rules_key_path = re.sub('\[\d+\]', '[0]', key_path)
value_match = False
if key in input_dict.keys():
value_index = self._datatype_classes.index(value.__class__)
value_type = self._datatype_names[value_index]
try:
v_index = self._datatype_classes.index(input_dict[key].__class__)
v_type = self._datatype_names[v_index]
if v_type == value_type:
value_match = True
except:
value_match = False
if value_match:
if value_type == 'null':
valid_dict[key] = input_dict[key]
elif value_type == 'boolean':
valid_dict[key] = self._ingest_boolean(input_dict[key], key_path)
elif value_type == 'number':
valid_dict[key] = self._ingest_number(input_dict[key], key_path)
elif value_type == 'string':
valid_dict[key] = self._ingest_string(input_dict[key], key_path)
elif value_type == 'map':
valid_dict[key] = self._ingest_dict(input_dict[key], schema_dict[key], key_path)
elif value_type == 'list':
valid_dict[key] = self._ingest_list(input_dict[key], schema_dict[key], key_path)
else:
value_type = self.keyMap[rules_key_path]['value_datatype']
if 'default_value' in self.keyMap[rules_key_path]:
valid_dict[key] = self.keyMap[rules_key_path]['default_value']
elif value_type == 'null':
valid_dict[key] = None
elif value_type == 'boolean':
valid_dict[key] = False
elif value_type == 'number':
valid_dict[key] = 0.0
if 'integer_data' in self.keyMap[rules_key_path].keys():
if self.keyMap[rules_key_path]['integer_data']:
valid_dict[key] = 0
elif value_type == 'string':
valid_dict[key] = ''
elif value_type == 'list':
valid_dict[key] = []
elif value_type == 'map':
valid_dict[key] = self._ingest_dict({}, schema_dict[key], key_path)
# add extra fields if set to True
if self.keyMap[rules_path_to_root]['extra_fields']:
for key, value in input_dict.items():
if key not in valid_dict.keys():
valid_dict[key] = value
return valid_dict
def _ingest_list(self, input_list, schema_list, path_to_root):
'''
a helper method for ingesting items in a list
:return: valid_list
'''
valid_list = []
# construct max list size
max_size = None
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'max_size' in self.keyMap[rules_path_to_root].keys():
if not self.keyMap[rules_path_to_root]['max_size']:
return valid_list
else:
max_size = self.keyMap[rules_path_to_root]['max_size']
# iterate over items in input list
if input_list:
rules_index = self._datatype_classes.index(schema_list[0].__class__)
rules_type = self._datatype_names[rules_index]
for i in range(len(input_list)):
item_path = '%s[%s]' % (path_to_root, i)
value_match = False
try:
item_index = self._datatype_classes.index(input_list[i].__class__)
item_type = self._datatype_names[item_index]
if item_type == rules_type:
value_match = True
except:
value_match = False
if value_match:
try:
if item_type == 'boolean':
valid_list.append(self._validate_boolean(input_list[i], item_path))
elif item_type == 'number':
valid_list.append(self._validate_number(input_list[i], item_path))
elif item_type == 'string':
valid_list.append(self._validate_string(input_list[i], item_path))
elif item_type == 'map':
valid_list.append(self._ingest_dict(input_list[i], schema_list[0], item_path))
elif item_type == 'list':
valid_list.append(self._ingest_list(input_list[i], schema_list[0], item_path))
except:
pass
if isinstance(max_size, int):
if len(valid_list) == max_size:
return valid_list
return valid_list
def _ingest_string(self, input_string, path_to_root):
'''
a helper method for ingesting a string
:return: valid_string
'''
valid_string = ''
try:
valid_string = self._validate_string(input_string, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_string = self.keyMap[rules_path_to_root]['default_value']
return valid_string
def _ingest_boolean(self, input_boolean, path_to_root):
'''
a helper method for ingesting a boolean
:return: valid_boolean
'''
valid_boolean = False
try:
valid_boolean = self._validate_boolean(input_boolean, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_boolean = self.keyMap[rules_path_to_root]['default_value']
return valid_boolean
def _reconstruct(self, path_to_root):
'''
a helper method for finding the schema endpoint from a path to root
:param path_to_root: string with dot path to root from
:return: list, dict, string, number, or boolean at path to root
'''
# split path to root into segments
item_pattern = re.compile('\d+\\]')
dot_pattern = re.compile('\\.|\\[')
path_segments = dot_pattern.split(path_to_root)
# construct base schema endpoint
schema_endpoint = self.schema
# reconstruct schema endpoint from segments
if path_segments[1]:
for i in range(1,len(path_segments)):
if item_pattern.match(path_segments[i]):
schema_endpoint = schema_endpoint[0]
else:
schema_endpoint = schema_endpoint[path_segments[i]]
return schema_endpoint
def _walk(self, path_to_root, record_dict):
'''
a helper method for finding the record endpoint from a path to root
:param path_to_root: string with dot path to root from
:param record_dict:
:return: list, dict, string, number, or boolean at path to root
'''
# split path to root into segments
item_pattern = re.compile('\d+\\]')
dot_pattern = re.compile('\\.|\\[')
path_segments = dot_pattern.split(path_to_root)
# construct empty fields
record_endpoints = []
# determine starting position
if not path_segments[0]:
path_segments.pop(0)
# define internal recursive function
def _walk_int(path_segments, record_dict):
record_endpoint = record_dict
for i in range(0, len(path_segments)):
if item_pattern.match(path_segments[i]):
for j in range(0, len(record_endpoint)):
if len(path_segments) == 2:
record_endpoints.append(record_endpoint[j])
else:
stop_chain = False
for x in range(0, i):
if item_pattern.match(path_segments[x]):
stop_chain = True
if not stop_chain:
shortened_segments = []
for z in range(i + 1, len(path_segments)):
shortened_segments.append(path_segments[z])
_walk_int(shortened_segments, record_endpoint[j])
else:
stop_chain = False
for y in range(0, i):
if item_pattern.match(path_segments[y]):
stop_chain = True
if not stop_chain:
if len(path_segments) == i + 1:
record_endpoints.append(record_endpoint[path_segments[i]])
else:
record_endpoint = record_endpoint[path_segments[i]]
# conduct recursive walk
_walk_int(path_segments, record_dict)
return record_endpoints
def validate(self, input_data, path_to_root='', object_title=''):
'''
a core method for validating input against the model
input_data is only returned if all data is valid
:param input_data: list, dict, string, number, or boolean to validate
:param path_to_root: [optional] string with dot-path of model component
:param object_title: [optional] string with name of input to validate
:return: input_data (or InputValidationError)
'''
__name__ = '%s.validate' % self.__class__.__name__
_path_arg = '%s(path_to_root="...")' % __name__
_title_arg = '%s(object_title="...")' % __name__
# validate input
copy_path = path_to_root
if path_to_root:
if not isinstance(path_to_root, str):
raise ModelValidationError('%s must be a string.' % _path_arg)
else:
if path_to_root[0] != '.':
copy_path = '.%s' % path_to_root
if not copy_path in self.keyMap.keys():
raise ModelValidationError('%s does not exist in components %s.' % (_path_arg.replace('...', path_to_root), self.keyMap.keys()))
else:
copy_path = '.'
if object_title:
if not isinstance(object_title, str):
raise ModelValidationError('%s must be a string' % _title_arg)
# construct generic error dictionary
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[copy_path],
'failed_test': 'value_datatype',
'input_path': copy_path,
'error_value': input_data,
'error_code': 4001
}
# determine value type of input data
try:
input_index = self._datatype_classes.index(input_data.__class__)
except:
error_dict['error_value'] = input_data.__class__.__name__
raise InputValidationError(error_dict)
input_type = self._datatype_names[input_index]
# validate input data type
if input_type != self.keyMap[copy_path]['value_datatype']:
raise InputValidationError(error_dict)
# run helper method appropriate to data type
if input_type == 'boolean':
input_data = self._validate_boolean(input_data, copy_path, object_title)
elif input_type == 'number':
input_data = self._validate_number(input_data, copy_path, object_title)
elif input_type == 'string':
input_data = self._validate_string(input_data, copy_path, object_title)
elif input_type == 'list':
schema_list = self._reconstruct(copy_path)
input_data = self._validate_list(input_data, schema_list, copy_path, object_title)
elif input_type == 'map':
schema_dict = self._reconstruct(copy_path)
input_data = self._validate_dict(input_data, schema_dict, copy_path, object_title)
return input_data
def ingest(self, **kwargs):
'''
a core method to ingest and validate arbitrary keyword data
**NOTE: data is always returned with this method**
for each key in the model, a value is returned according
to the following priority:
1. value in kwargs if field passes validation test
2. default value declared for the key in the model
3. empty value appropriate to datatype of key in the model
**NOTE: as long as a default value is provided for each key-
value, returned data will be model valid
**NOTE: if 'extra_fields' is True for a dictionary, the key-
value pair of all fields in kwargs which are not declared in
the model will also be added to the corresponding dictionary
data
**NOTE: if 'max_size' is declared for a list, method will
stop adding input to the list once it reaches max size
:param kwargs: key, value pairs
:return: dictionary with keys and value
'''
__name__ = '%s.ingest' % self.__class__.__name__
schema_dict = self.schema
path_to_root = '.'
valid_data = self._ingest_dict(kwargs, schema_dict, path_to_root)
return valid_data
def query(self, query_criteria, valid_record=None):
'''
a core method for querying model valid data with criteria
**NOTE: input is only returned if all fields & qualifiers are valid for model
:param query_criteria: dictionary with model field names and query qualifiers
:param valid_record: dictionary with model valid record
:return: boolean (or QueryValidationError)
an example of how to construct the query_criteria argument:
query_criteria = {
'.path.to.number': {
'min_value': 4.5
},
'.path.to.string': {
'must_contain': [ '\\regex' ]
}
}
**NOTE: for a full list of operators for query_criteria based upon field
datatype, see either the query-rules.json file or REFERENCE file
'''
__name__ = '%s.query' % self.__class__.__name__
_query_arg = '%s(query_criteria={...})' % __name__
_record_arg = '%s(valid_record={...})' % __name__
# validate input
if not isinstance(query_criteria, dict):
raise ModelValidationError('%s must be a dictionary.' % _query_arg)
# convert javascript dot_path to class dot_path
criteria_copy = {}
equal_fields = []
dot_fields = []
for key, value in query_criteria.items():
copy_key = key
if not key:
copy_key = '.'
else:
if key[0] != '.':
copy_key = '.%s' % key
dot_fields.append(copy_key)
criteria_copy[copy_key] = value
if value.__class__ in self._datatype_classes[0:4]:
criteria_copy[copy_key] = {
'equal_to': value
}
equal_fields.append(copy_key)
# validate query criteria against query rules
query_kwargs = {
'fields_dict': criteria_copy,
'fields_rules': self.queryRules,
'declared_value': False
}
try:
self._validate_fields(**query_kwargs)
except ModelValidationError as err:
message = err.error['message']
for field in equal_fields:
equal_error = 'field %s qualifier equal_to' % field
if message.find(equal_error) > -1:
message = message.replace(equal_error, 'field %s' % field)
break
field_pattern = re.compile('ield\s(\..*?)\s')
field_name = field_pattern.findall(message)
if field_name:
if field_name[0] in dot_fields:
def _replace_field(x):
return 'ield %s ' % x.group(1)[1:]
message = field_pattern.sub(_replace_field, message)
raise QueryValidationError(message)
# query test record
if valid_record:
if not isinstance(valid_record, dict):
raise ModelValidationError('%s must be a dictionary.' % _record_arg)
for key, value in criteria_copy.items():
eval_outcome = self._evaluate_field(valid_record, key, value)
if not eval_outcome:
return False
return True
|
collectiveacuity/jsonModel
|
jsonmodel/validators.py
|
jsonModel._ingest_string
|
python
|
def _ingest_string(self, input_string, path_to_root):
'''
a helper method for ingesting a string
:return: valid_string
'''
valid_string = ''
try:
valid_string = self._validate_string(input_string, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_string = self.keyMap[rules_path_to_root]['default_value']
return valid_string
|
a helper method for ingesting a string
:return: valid_string
|
train
|
https://github.com/collectiveacuity/jsonModel/blob/1ea64c36d78add3faa7b85ff82c5ec685458c940/jsonmodel/validators.py#L1349-L1366
| null |
class jsonModel(object):
__rules__ = jsonLoader('jsonmodel', 'models/model-rules.json')
def __init__(self, data_model, query_rules=None):
'''
a method for testing data model declaration & initializing the class
:param data_model: dictionary with json model architecture
:param query_rules: [optional] dictionary with valid field type qualifiers
:return: object with jsonModel methods
'''
# validate schema input
if not isinstance(data_model, dict):
raise ModelValidationError('Data model must be a dictionary.')
elif 'schema' not in data_model.keys():
raise ModelValidationError('Data model must have a schema key.')
elif not isinstance(data_model['schema'], dict):
raise ModelValidationError('Value for the data model "schema" field must be a dictionary.')
elif not data_model['schema']:
raise ModelValidationError('Data model "schema" field must not be empty.')
# construct base methods
from copy import deepcopy
data_model = deepcopy(data_model)
self.schema = data_model['schema']
model_map = mapModel(self.schema)
self.keyName = model_map.keyName
self.keyCriteria = model_map.keyCriteria
# construct protected type classes
self._datatype_names = mapModel._datatype_names
self._datatype_classes = mapModel._datatype_classes
# validate absence of item designators in keys
item_pattern = re.compile('\[\d+\]')
for i in range(len(self.keyName)):
patterns_found = item_pattern.findall(self.keyName[i])
if patterns_found:
for designator in patterns_found:
if designator != '[0]':
message = 'Key name for schema field %s must not contain the item designator pattern %s' % (self.keyName[i], designator)
raise ModelValidationError(message)
# validate existence of first item in list declarations
key_set = set(self.keyName)
for i in range(len(self.keyName)):
if self.keyCriteria[i]['value_datatype'] == 'list':
item_key = self.keyName[i] + '[0]'
if not item_key in key_set:
message = 'Schema field %s must declare an initial item for the list.' % self.keyName[i]
raise ModelValidationError(message)
# alter list requirement if first item is empty
else:
item_index = self.keyName.index(item_key)
if not self.keyCriteria[item_index]['required_field']:
self.keyCriteria[i]['required_field'] = False
# validate title input & construct title method
self.title = ''
if 'title' in data_model.keys():
if not isinstance(data_model['title'], str):
raise ModelValidationError('Value for model title must be a string.')
self.title = data_model['title']
# validate description input & construct description method
self.description = ''
if 'description' in data_model.keys():
if not isinstance(data_model['description'], str):
raise ModelValidationError('Value for model description must be a string.')
self.description = data_model['description']
# validate url input & construct title method
self.url = ''
if 'url' in data_model.keys():
if not isinstance(data_model['url'], str):
raise ModelValidationError('Value for model url must be a string.')
self.title = data_model['url']
# validate metadata input & construct metadata method
self.metadata = {}
if 'metadata' in data_model.keys():
if not isinstance(data_model['metadata'], dict):
raise ModelValidationError('Value for model metadata must be a dictionary.')
self.metadata = data_model['metadata']
# validate max size input & construct maxSize property
# self.maxSize = None
# if 'max_size' in data_model.keys():
# if not isinstance(data_model['max_size'], int):
# raise ModelValidationError('Value for model max_size must be a positive integer.')
# elif data_model['max_size'] < 0:
# raise ModelValidationError('Value for model max_size must be a positive integer.')
# elif data_model['max_size']:
# self.maxSize = data_model['max_size']
# validate components input & construct component property
self.components = {}
if 'components' in data_model.keys():
if not isinstance(data_model['components'], dict):
raise ModelValidationError('Value for model components must be a dictionary.')
self.components = self._validate_fields(data_model['components'], self.__rules__['components'])
# construct keyMap fields from key names and key criteria
self.keyMap = {}
for i in range(len(self.keyName)):
self.keyMap[self.keyName[i]] = self.keyCriteria[i]
for key, value in self.components.items():
# convert javascript dot_path to class dot_path
dot_key = ''
if not key:
dot_key = '.'
else:
if key[0] != '.':
dot_key = '.%s' % key
# add component declarations to keyMap
if key in self.keyMap.keys():
for k, v in self.components[key].items():
self.keyMap[key][k] = v
elif dot_key and dot_key in self.keyMap.keys():
for k, v in self.components[key].items():
self.keyMap[dot_key][k] = v
# validate default values in lists
self._validate_defaults(self.keyMap)
# construct queryRules property from class model rules
self.queryRules = {}
for key, value in self.__rules__['components'].items():
remove_from_query = [ 'required_field', 'default_value', 'example_values', 'field_title', 'field_position', 'field_description', 'field_metadata', 'extra_fields' ]
field_qualifiers = {
'value_exists': False
}
for k, v in value.items():
if k not in remove_from_query:
field_qualifiers[k] = v
self.queryRules[key] = field_qualifiers
# validate query rules input and replace queryRules property
if query_rules:
if not isinstance(query_rules, dict):
message = 'Value for query rules input must be a dictionary.'
raise ModelValidationError(message)
input_set = set(query_rules.keys())
req_set = set(self.queryRules.keys())
if input_set - req_set:
message = 'Query rules input may only have %s field key names.' % req_set
raise ModelValidationError(message)
elif req_set - input_set:
message = 'Query rules input must have all %s field key names.' % req_set
raise ModelValidationError(message)
for key in req_set:
if not isinstance(query_rules[key], dict):
message = 'Value for query rules %s field must be a dictionary.' % key
raise ModelValidationError(message)
input_qualifier_set = set(query_rules[key].keys())
req_qualifier_set = set(self.queryRules[key].keys())
if input_qualifier_set - req_qualifier_set:
message = 'Query rules field %s may only have qualifiers %s' % (key, req_qualifier_set)
raise ModelValidationError(message)
for k, v in query_rules[key].items():
if v.__class__ != self.queryRules[key][k].__class__:
qualifier_index = self._datatype_classes.index(self.queryRules[key][k].__class__)
qualifier_type = self._datatype_names[qualifier_index]
message = 'Value for query rules field %s qualifier %s must be a "%s" datatype.' % (key, k, qualifier_type)
raise ModelValidationError(message)
self.queryRules = query_rules
def _validate_fields(self, fields_dict, fields_rules, declared_value=True):
# validate key names in fields
for key, value in fields_dict.items():
# convert javascript dot_path to class dot_path
if not key:
key = '.'
else:
if key[0] != '.':
key = '.%s' % key
if key not in self.keyName:
raise ModelValidationError('Field %s is not a field declared in model schema.' % key)
elif not isinstance(value, dict):
raise ModelValidationError('Value for field %s must be a dictionary.' % key)
# validate field criteria are appropriate to field datatype
value_type = self.keyCriteria[self.keyName.index(key)]['value_datatype']
type_dict = {}
if value_type == 'string':
type_dict = fields_rules['.string_fields']
elif value_type == 'number':
type_dict = fields_rules['.number_fields']
elif value_type == 'boolean':
type_dict = fields_rules['.boolean_fields']
elif value_type == 'list':
type_dict = fields_rules['.list_fields']
elif value_type == 'map':
type_dict = fields_rules['.map_fields']
elif value_type == 'null':
type_dict = fields_rules['.null_fields']
if set(value.keys()) - set(type_dict.keys()):
raise ModelValidationError('Field %s may only have datatype %s qualifiers %s.' % (key, value_type, set(type_dict.keys())))
# validate criteria qualifier values are appropriate datatype
for k, v in value.items():
v_index = self._datatype_classes.index(v.__class__)
v_type = self._datatype_names[v_index]
qualifier_index = self._datatype_classes.index(type_dict[k].__class__)
qualifier_type = self._datatype_names[qualifier_index]
if v_type != qualifier_type:
message = 'Value for field %s qualifier %s must be a %s datatype.' % (key, k, qualifier_type)
raise ModelValidationError(message)
if qualifier_type == 'number':
if isinstance(type_dict[k], int):
if not isinstance(v, int):
message = 'Value for field %s qualifier %s must be an integer.' % (key, k)
raise ModelValidationError(message)
# validate internal logic of each qualifier value declaration
if k in ('must_not_contain', 'must_contain', 'contains_either'):
for item in v:
if not isinstance(item, str):
message = 'Each item in list field %s qualifier %s must be a string.' % (key, k)
raise ModelValidationError(message)
if k in ('min_length', 'max_length', 'min_size', 'max_size'):
if v < 0:
message = 'Value for field %s qualifier %s cannot be negative.' % (key, k)
raise ModelValidationError(message)
if k in ('discrete_values', 'excluded_values', 'example_values'):
for item in v:
if value_type == 'number':
if not isinstance(item, int) and not isinstance(item, float):
message = 'Each item in field %s qualifier %s list must be a number.' % (key, k)
raise ModelValidationError(message)
elif not isinstance(item, str):
message = 'Each item in list for field %s qualifier %s must be a string.' % (key, k)
raise ModelValidationError(message)
if k == 'identical_to':
if not v in self.keyName:
message = 'Value "%s" for field %s qualifier %s not found in components keys.' % (v, key, k)
raise ModelValidationError(message)
if k == 'unique_values':
if v:
item_name = key + '[0]'
item_type = self.keyCriteria[self.keyName.index(item_name)]['value_datatype']
if not item_type in ('number', 'string'):
message = 'Field %s[0] must be either a string or number if qualifier "unique_values": true' % key
raise ModelValidationError(message)
# validate lack of other qualifiers if value exist is false
if 'value_exists' in value.keys():
if not value['value_exists']:
if set(value.keys()) - {'value_exists'}:
message = 'If field %s qualifier value_exists: false, field may not have other qualifiers.' % key
raise ModelValidationError(message)
# validate size qualifiers against each other
size_qualifiers = ['min_size', 'max_size']
for qualifier in size_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
header = 'Value %s for %s' % (test_value, value_path)
if 'min_size' in value.keys():
if test_value < value['min_size']:
message = '%s must not be less than "min_size": %s' % (header, value['min_size'])
raise ModelValidationError(message)
if 'max_size' in value.keys():
if test_value > value['max_size']:
message = '%s must not be greater than "max_size": %s' % (header, value['max_size'])
raise ModelValidationError(message)
# validate length qualifiers against each other
length_qualifiers = ['min_length', 'max_length']
for qualifier in length_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
header = 'Value %s for %s' % (test_value, value_path)
if 'min_length' in value.keys():
if test_value < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if test_value > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
# validate range qualifiers against each other & length qualifiers
range_qualifiers = ['min_value', 'max_value', 'greater_than', 'less_than', 'equal_to']
for qualifier in range_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
header = 'Value %s%s%s for %s' % (quote_text, test_value, quote_text, value_path)
if 'min_value' in value.keys():
if test_value < value['min_value']:
message = '%s must not be less than "min_value": %s' % (header, value['min_value'])
raise ModelValidationError(message)
if 'max_value' in value.keys():
if test_value > value['max_value']:
message = '%s must not be greater than "max_value": %s' % (header, value['max_value'])
raise ModelValidationError(message)
if 'greater_than' in value.keys():
if test_value <= value['greater_than'] and not qualifier == 'greater_than':
message = '%s must be "greater_than": %s' % (header, value['greater_than'])
raise ModelValidationError(message)
if 'less_than' in value.keys():
if test_value >= value['less_than'] and not qualifier == 'less_than':
message = '%s must be "less_than": %s' % (header, value['less_than'])
raise ModelValidationError(message)
if 'min_length' in value.keys():
if len(test_value) < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if len(test_value) > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
if 'integer_data' in value.keys():
if value['integer_data']:
if not isinstance(test_value, int):
message = '%s must be an "integer_data".' % header
raise ModelValidationError(message)
if 'must_not_contain' in value.keys():
for regex in value['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
message = '%s matches regex pattern in "must_not_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'must_contain' in value.keys():
for regex in value['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(test_value):
message = '%s does not match regex pattern in "must_contain": ["%s"].' % (header, regex)
raise ModelValidationError(message)
if 'contains_either' in value.keys():
regex_match = False
regex_patterns = []
for regex in value['contains_either']:
regex_patterns.append(regex)
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
regex_match = True
if not regex_match:
message = '%s does not match any regex patterns in "contains_either": %s' % (header, regex_patterns)
raise ModelValidationError(message)
if 'byte_data' in value.keys():
if value['byte_data']:
if qualifier != 'equal_to':
message = '%s cannot be used with base64 encoded "byte_data".' % header
raise ModelValidationError(message)
# validate discrete value qualifiers against other criteria
schema_field = self.keyCriteria[self.keyName.index(key)]
discrete_qualifiers = ['declared_value', 'default_value', 'excluded_values', 'discrete_values', 'example_values']
for qualifier in discrete_qualifiers:
test_qualifier = False
if qualifier in schema_field:
test_qualifier = True
if qualifier == 'declared_value' and not schema_field[qualifier]:
test_qualifier = False
if qualifier in value.keys() or (test_qualifier and declared_value):
multiple_values = False
if qualifier in value.keys():
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
else:
test_list = [schema_field[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
header = 'Value %s%s%s for %s%s' % (quote_text, test_value, quote_text, value_path, item_text)
if 'min_value' in value.keys():
if test_value < value['min_value']:
message = '%s must not be less than "min_value": %s' % (header, value['min_value'])
raise ModelValidationError(message)
if 'max_value' in value.keys():
if test_value > value['max_value']:
message = '%s must not be greater than "max_value": %s' % (header, value['max_value'])
raise ModelValidationError(message)
if 'equal_to' in value.keys():
if test_value != value['equal_to']:
if qualifier != 'declared_value' and isinstance(test_value, bool):
message = '%s must be "equal_to": %s' % (header, value['equal_to'])
raise ModelValidationError(message)
if 'greater_than' in value.keys():
if test_value <= value['greater_than']:
message = '%s must be "greater_than": %s' % (header, value['greater_than'])
raise ModelValidationError(message)
if 'less_than' in value.keys():
if test_value >= value['less_than']:
message = '%s must be "less_than": %s' % (header, value['less_than'])
raise ModelValidationError(message)
if 'integer_data' in value.keys():
if value['integer_data']:
if not isinstance(test_value, int):
message = '%s must be an "integer_data".' % header
raise ModelValidationError(message)
if 'min_length' in value.keys():
if len(test_value) < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if len(test_value) > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
if 'must_not_contain' in value.keys():
for regex in value['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
message = '%s matches regex pattern in "must_not_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'must_contain' in value.keys():
for regex in value['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(test_value):
message = '%s does not match regex pattern in "must_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'contains_either' in value.keys():
regex_match = False
regex_patterns = []
for regex in value['contains_either']:
regex_patterns.append(regex)
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
regex_match = True
if not regex_match:
message = '%s does not match any regex patterns in "contains_either": %s' % (header, regex_patterns)
raise ModelValidationError(message)
if 'byte_data' in value.keys():
message = '%s cannot be base64 decoded to "byte_data".' % header
try:
decoded_bytes = b64decode(test_value)
except:
raise ModelValidationError(message)
if not isinstance(decoded_bytes, bytes):
raise ModelValidationError(message)
# validate discrete value qualifiers against each other
for qualifier in discrete_qualifiers:
test_qualifier = False
if qualifier in schema_field:
test_qualifier = True
if qualifier == 'declared_value' and not schema_field[qualifier]:
test_qualifier = False
if qualifier in value.keys() or (test_qualifier and declared_value):
multiple_values = False
if qualifier in value.keys():
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
else:
test_list = [schema_field[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
header = 'Value %s%s%s for %s%s' % (quote_text, test_value, quote_text, value_path, item_text)
if 'excluded_values' in value.keys():
if not qualifier == 'excluded_values':
if test_value in value['excluded_values']:
message = '%s cannot be one of "excluded_values": %s.' % (header, value['excluded_values'])
raise ModelValidationError(message)
if 'discrete_values' in value.keys():
if not qualifier == 'excluded_values':
if test_value not in value['discrete_values']:
message = '%s must be one of "discrete_values": %s' % (header, value['discrete_values'])
raise ModelValidationError(message)
return fields_dict
def _validate_defaults(self, fields_dict):
# validate key names in fields
for key, value in fields_dict.items():
# retrieve value type and type dict
value_type = self.keyCriteria[self.keyName.index(key)]['value_datatype']
# validate discrete value qualifiers against other criteria
qualifier = 'default_value'
if qualifier in value.keys():
multiple_values = False
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
qualifier_text = value_path + item_text
header = 'Value %s%s%s for %s' % (quote_text, test_value, quote_text, qualifier_text)
if value_type == 'list':
default_item_key = '%s[0]' % key
try:
self.validate(test_value, default_item_key, object_title=header)
except Exception as err:
raise ModelValidationError(str(err).strip().replace('field %s' % default_item_key, qualifier_text))
return fields_dict
def _evaluate_field(self, record_dict, field_name, field_criteria):
''' a helper method for evaluating record values based upon query criteria
:param record_dict: dictionary with model valid data to evaluate
:param field_name: string with path to root of query field
:param field_criteria: dictionary with query operators and qualifiers
:return: boolean (True if no field_criteria evaluate to false)
'''
# determine value existence criteria
value_exists = True
if 'value_exists' in field_criteria.keys():
if not field_criteria['value_exists']:
value_exists = False
# validate existence of field
field_exists = True
try:
record_values = self._walk(field_name, record_dict)
except:
field_exists = False
# evaluate existence query criteria
if value_exists != field_exists:
return False
elif not value_exists:
return True
# convert javascript dot_path to class dot_path
field_key = field_name
if not field_name:
field_key = '.'
else:
if field_name[0] != '.':
field_key = '.%s' % field_name
# evaluate other query criteria
for key, value in field_criteria.items():
if key in ('min_size', 'min_length'):
found = False
if self.keyMap[field_key]['value_datatype'] == 'map':
for record_value in record_values:
record_size = sys.getsizeof(json.dumps(str(record_value)).replace(' ','')) - 51
if record_size >= value:
found = True
break
else:
for record_value in record_values:
if len(record_value) >= value:
found = True
break
if not found:
return False
elif key in ('max_size', 'max_length'):
found = False
if self.keyMap[field_key]['value_datatype'] == 'map':
for record_value in record_values:
record_size = sys.getsizeof(json.dumps(str(record_value)).replace(' ','')) - 51
if record_size <= value:
found = True
break
else:
for record_value in record_values:
if len(record_value) <= value:
found = True
break
if not found:
return False
elif key == 'min_value':
found = False
for record_value in record_values:
if record_value >= value:
found = True
break
if not found:
return False
elif key == 'max_value':
found = False
for record_value in record_values:
if record_value <= value:
found = True
break
if not found:
return False
elif key == 'equal_to':
found = False
for record_value in record_values:
if record_value == value:
found = True
break
if not found:
return False
elif key == 'greater_than':
found = False
for record_value in record_values:
if record_value > value:
found = True
break
if not found:
return False
elif key == 'less_than':
found = False
for record_value in record_values:
if record_value < value:
found = True
break
if not found:
return False
elif key == 'excluded_values':
for record_value in record_values:
if record_value in value:
return False
elif key == 'discrete_values':
found = False
for record_value in record_values:
if record_value in value:
found = True
break
if not found:
return False
elif key == 'integer_data':
found = False
dummy_int = 1
for record_value in record_values:
if record_value.__class__ == dummy_int.__class__:
found = True
break
if value != found:
return False
elif key == 'byte_data':
found = False
for record_value in record_values:
try:
decoded_bytes = b64decode(record_value)
except:
decoded_bytes = ''
if isinstance(decoded_bytes, bytes):
found = True
break
if value != found:
return False
elif key == 'must_contain':
for regex in value:
regex_pattern = re.compile(regex)
found = False
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if not found:
return False
elif key == 'must_not_contain':
for regex in value:
regex_pattern = re.compile(regex)
found = False
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if found:
return False
elif key == 'contains_either':
found = False
for regex in value:
regex_pattern = re.compile(regex)
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if found:
break
if not found:
return False
elif key == 'unique_values':
for record_value in record_values:
unique_values = True
if len(record_value) != len(set(record_value)):
unique_values = False
if value != unique_values:
return False
return True
def _validate_dict(self, input_dict, schema_dict, path_to_root, object_title=''):
''' a helper method for recursively validating keys in dictionaries
:return input_dict
'''
# reconstruct key path to current dictionary in model
rules_top_level_key = re.sub('\[\d+\]', '[0]', path_to_root)
map_rules = self.keyMap[rules_top_level_key]
# construct list error report template
map_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': map_rules,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': 0,
'error_code': 4001
}
# validate map size
if 'min_size' in map_rules.keys():
input_size = sys.getsizeof(json.dumps(str(input_dict)).replace(' ','')) - 51
if input_size < map_rules['min_size']:
map_error['failed_test'] = 'min_size'
map_error['error_value'] = input_size
map_error['error_code'] = 4031
raise InputValidationError(map_error)
if 'max_size' in map_rules.keys():
input_size = sys.getsizeof(json.dumps(str(input_dict)).replace(' ','')) - 51
if input_size > map_rules['max_size']:
map_error['failed_test'] = 'max_size'
map_error['error_value'] = input_size
map_error['error_code'] = 4032
raise InputValidationError(map_error)
# construct lists of keys in input dictionary
input_keys = []
input_key_list = []
for key in input_dict.keys():
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'key_datatype',
'input_path': path_to_root,
'error_value': key,
'error_code': 4004
}
error_dict['input_criteria']['key_datatype'] = 'string'
if path_to_root == '.':
if not isinstance(key, str):
input_key_name = path_to_root + str(key)
error_dict['input_path'] = input_key_name
raise InputValidationError(error_dict)
input_key_name = path_to_root + key
else:
if not isinstance(key, str):
input_key_name = path_to_root + '.' + str(key)
error_dict['input_path'] = input_key_name
raise InputValidationError(error_dict)
input_key_name = path_to_root + '.' + key
input_keys.append(input_key_name)
input_key_list.append(key)
# TODO: validate top-level key and values against identical to reference
# TODO: run lambda function and call validation
# construct lists of keys in schema dictionary
max_keys = []
max_key_list = []
req_keys = []
req_key_list = []
for key in schema_dict.keys():
if path_to_root == '.':
schema_key_name = path_to_root + key
else:
schema_key_name = path_to_root + '.' + key
max_keys.append(schema_key_name)
max_key_list.append(key)
rules_schema_key_name = re.sub('\[\d+\]', '[0]', schema_key_name)
if self.keyMap[rules_schema_key_name]['required_field']:
req_keys.append(schema_key_name)
req_key_list.append(key)
# validate existence of required fields
missing_keys = set(req_keys) - set(input_keys)
if missing_keys:
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'required_field',
'input_path': path_to_root,
'error_value': list(missing_keys),
'error_code': 4002
}
error_dict['input_criteria']['required_keys'] = req_keys
raise InputValidationError(error_dict)
# validate existence of extra fields
extra_keys = set(input_keys) - set(max_keys)
if extra_keys and not self.keyMap[rules_top_level_key]['extra_fields']:
extra_key_list = []
for key in extra_keys:
pathless_key = re.sub(rules_top_level_key, '', key, count=1)
extra_key_list.append(pathless_key)
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'extra_fields',
'input_path': path_to_root,
'error_value': extra_key_list,
'error_code': 4003
}
error_dict['input_criteria']['maximum_scope'] = max_key_list
raise InputValidationError(error_dict)
# validate datatype of value
for key, value in input_dict.items():
if path_to_root == '.':
input_key_name = path_to_root + key
else:
input_key_name = path_to_root + '.' + key
rules_input_key_name = re.sub('\[\d+\]', '[0]', input_key_name)
if input_key_name in max_keys:
input_criteria = self.keyMap[rules_input_key_name]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': input_key_name,
'error_value': value,
'error_code': 4001
}
try:
value_index = self._datatype_classes.index(value.__class__)
except:
error_dict['error_value'] = value.__class__.__name__
raise InputValidationError(error_dict)
value_type = self._datatype_names[value_index]
if input_criteria['value_datatype'] == 'null':
pass
else:
if value_type != input_criteria['value_datatype']:
raise InputValidationError(error_dict)
# call appropriate validation sub-routine for datatype of value
if value_type == 'boolean':
input_dict[key] = self._validate_boolean(value, input_key_name, object_title)
elif value_type == 'number':
input_dict[key] = self._validate_number(value, input_key_name, object_title)
elif value_type == 'string':
input_dict[key] = self._validate_string(value, input_key_name, object_title)
elif value_type == 'map':
input_dict[key] = self._validate_dict(value, schema_dict[key], input_key_name, object_title)
elif value_type == 'list':
input_dict[key] = self._validate_list(value, schema_dict[key], input_key_name, object_title)
# set default values for empty optional fields
for key in max_key_list:
if key not in input_key_list:
indexed_key = max_keys[max_key_list.index(key)]
if indexed_key in self.components.keys():
if 'default_value' in self.components[indexed_key]:
input_dict[key] = self.components[indexed_key]['default_value']
return input_dict
def _validate_list(self, input_list, schema_list, path_to_root, object_title=''):
'''
a helper method for recursively validating items in a list
:return: input_list
'''
# construct rules for list and items
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
list_rules = self.keyMap[rules_path_to_root]
initial_key = rules_path_to_root + '[0]'
item_rules = self.keyMap[initial_key]
# construct list error report template
list_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': list_rules,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': 0,
'error_code': 4001
}
# validate list rules
if 'min_size' in list_rules.keys():
if len(input_list) < list_rules['min_size']:
list_error['failed_test'] = 'min_size'
list_error['error_value'] = len(input_list)
list_error['error_code'] = 4031
raise InputValidationError(list_error)
if 'max_size' in list_rules.keys():
if len(input_list) > list_rules['max_size']:
list_error['failed_test'] = 'max_size'
list_error['error_value'] = len(input_list)
list_error['error_code'] = 4032
raise InputValidationError(list_error)
# construct item error report template
item_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': item_rules,
'failed_test': 'value_datatype',
'input_path': initial_key,
'error_value': None,
'error_code': 4001
}
# validate datatype of items
for i in range(len(input_list)):
input_path = path_to_root + '[%s]' % i
item = input_list[i]
item_error['input_path'] = input_path
try:
item_index = self._datatype_classes.index(item.__class__)
except:
item_error['error_value'] = item.__class__.__name__
raise InputValidationError(item_error)
item_type = self._datatype_names[item_index]
item_error['error_value'] = item
if item_rules['value_datatype'] == 'null':
pass
else:
if item_type != item_rules['value_datatype']:
raise InputValidationError(item_error)
# call appropriate validation sub-routine for datatype of item
if item_type == 'boolean':
input_list[i] = self._validate_boolean(item, input_path, object_title)
elif item_type == 'number':
input_list[i] = self._validate_number(item, input_path, object_title)
elif item_type == 'string':
input_list[i] = self._validate_string(item, input_path, object_title)
elif item_type == 'map':
input_list[i] = self._validate_dict(item, schema_list[0], input_path, object_title)
elif item_type == 'list':
input_list[i] = self._validate_list(item, schema_list[0], input_path, object_title)
# validate unique values in list
if 'unique_values' in list_rules.keys():
if len(set(input_list)) < len(input_list):
list_error['failed_test'] = 'unique_values'
list_error['error_value'] = input_list
list_error['error_code'] = 4033
raise InputValidationError(list_error)
# TODO: validate top-level item values against identical to reference
# TODO: run lambda function and call validation url
return input_list
def _validate_number(self, input_number, path_to_root, object_title=''):
'''
a helper method for validating properties of a number
:return: input_number
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_number,
'error_code': 4001
}
if 'integer_data' in input_criteria.keys():
if input_criteria['integer_data'] and not isinstance(input_number, int):
error_dict['failed_test'] = 'integer_data'
error_dict['error_code'] = 4021
raise InputValidationError(error_dict)
if 'min_value' in input_criteria.keys():
if input_number < input_criteria['min_value']:
error_dict['failed_test'] = 'min_value'
error_dict['error_code'] = 4022
raise InputValidationError(error_dict)
if 'max_value' in input_criteria.keys():
if input_number > input_criteria['max_value']:
error_dict['failed_test'] = 'max_value'
error_dict['error_code'] = 4023
raise InputValidationError(error_dict)
if 'greater_than' in input_criteria.keys():
if input_number <= input_criteria['greater_than']:
error_dict['failed_test'] = 'greater_than'
error_dict['error_code'] = 4024
raise InputValidationError(error_dict)
if 'less_than' in input_criteria.keys():
if input_number >= input_criteria['less_than']:
error_dict['failed_test'] = 'less_than'
error_dict['error_code'] = 4025
raise InputValidationError(error_dict)
if 'equal_to' in input_criteria.keys():
if input_number != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
if 'discrete_values' in input_criteria.keys():
if input_number not in input_criteria['discrete_values']:
error_dict['failed_test'] = 'discrete_values'
error_dict['error_code'] = 4041
raise InputValidationError(error_dict)
if 'excluded_values' in input_criteria.keys():
if input_number in input_criteria['excluded_values']:
error_dict['failed_test'] = 'excluded_values'
error_dict['error_code'] = 4042
raise InputValidationError(error_dict)
# TODO: validate number against identical to reference
# TODO: run lambda function and call validation url
return input_number
def _validate_string(self, input_string, path_to_root, object_title=''):
'''
a helper method for validating properties of a string
:return: input_string
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_string,
'error_code': 4001
}
if 'byte_data' in input_criteria.keys():
if input_criteria['byte_data']:
error_dict['failed_test'] = 'byte_data'
error_dict['error_code'] = 4011
try:
decoded_bytes = b64decode(input_string)
except:
raise InputValidationError(error_dict)
if not isinstance(decoded_bytes, bytes):
raise InputValidationError(error_dict)
if 'min_value' in input_criteria.keys():
if input_string < input_criteria['min_value']:
error_dict['failed_test'] = 'min_value'
error_dict['error_code'] = 4022
raise InputValidationError(error_dict)
if 'max_value' in input_criteria.keys():
if input_string > input_criteria['max_value']:
error_dict['failed_test'] = 'max_value'
error_dict['error_code'] = 4023
raise InputValidationError(error_dict)
if 'greater_than' in input_criteria.keys():
if input_string <= input_criteria['greater_than']:
error_dict['failed_test'] = 'greater_than'
error_dict['error_code'] = 4024
raise InputValidationError(error_dict)
if 'less_than' in input_criteria.keys():
if input_string >= input_criteria['less_than']:
error_dict['failed_test'] = 'less_than'
error_dict['error_code'] = 4025
raise InputValidationError(error_dict)
if 'equal_to' in input_criteria.keys():
if input_string != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
if 'min_length' in input_criteria.keys():
if len(input_string) < input_criteria['min_length']:
error_dict['failed_test'] = 'min_length'
error_dict['error_code'] = 4012
raise InputValidationError(error_dict)
if 'max_length' in input_criteria.keys():
if len(input_string) > input_criteria['max_length']:
error_dict['failed_test'] = 'max_length'
error_dict['error_code'] = 4013
raise InputValidationError(error_dict)
if 'must_not_contain' in input_criteria.keys():
for regex in input_criteria['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(input_string):
error_dict['failed_test'] = 'must_not_contain'
error_dict['error_code'] = 4014
raise InputValidationError(error_dict)
if 'must_contain' in input_criteria.keys():
for regex in input_criteria['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(input_string):
error_dict['failed_test'] = 'must_contain'
error_dict['error_code'] = 4015
raise InputValidationError(error_dict)
if 'contains_either' in input_criteria.keys():
regex_match = False
for regex in input_criteria['contains_either']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(input_string):
regex_match = True
if not regex_match:
error_dict['failed_test'] = 'contains_either'
error_dict['error_code'] = 4016
raise InputValidationError(error_dict)
if 'discrete_values' in input_criteria.keys():
if input_string not in input_criteria['discrete_values']:
error_dict['failed_test'] = 'discrete_values'
error_dict['error_code'] = 4041
raise InputValidationError(error_dict)
if 'excluded_values' in input_criteria.keys():
if input_string in input_criteria['excluded_values']:
error_dict['failed_test'] = 'excluded_values'
error_dict['error_code'] = 4042
raise InputValidationError(error_dict)
# TODO: validate string against identical to reference
# TODO: run lambda function and call validation url
return input_string
def _validate_boolean(self, input_boolean, path_to_root, object_title=''):
'''
a helper method for validating properties of a boolean
:return: input_boolean
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_boolean,
'error_code': 4001
}
if 'equal_to' in input_criteria.keys():
if input_boolean != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
# TODO: validate boolean against identical to reference
# TODO: run lambda function and call validation url
return input_boolean
def _ingest_dict(self, input_dict, schema_dict, path_to_root):
'''
a helper method for ingesting keys, value pairs in a dictionary
:return: valid_dict
'''
valid_dict = {}
# construct path to root for rules
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
# iterate over keys in schema dict
for key, value in schema_dict.items():
key_path = path_to_root
if not key_path == '.':
key_path += '.'
key_path += key
rules_key_path = re.sub('\[\d+\]', '[0]', key_path)
value_match = False
if key in input_dict.keys():
value_index = self._datatype_classes.index(value.__class__)
value_type = self._datatype_names[value_index]
try:
v_index = self._datatype_classes.index(input_dict[key].__class__)
v_type = self._datatype_names[v_index]
if v_type == value_type:
value_match = True
except:
value_match = False
if value_match:
if value_type == 'null':
valid_dict[key] = input_dict[key]
elif value_type == 'boolean':
valid_dict[key] = self._ingest_boolean(input_dict[key], key_path)
elif value_type == 'number':
valid_dict[key] = self._ingest_number(input_dict[key], key_path)
elif value_type == 'string':
valid_dict[key] = self._ingest_string(input_dict[key], key_path)
elif value_type == 'map':
valid_dict[key] = self._ingest_dict(input_dict[key], schema_dict[key], key_path)
elif value_type == 'list':
valid_dict[key] = self._ingest_list(input_dict[key], schema_dict[key], key_path)
else:
value_type = self.keyMap[rules_key_path]['value_datatype']
if 'default_value' in self.keyMap[rules_key_path]:
valid_dict[key] = self.keyMap[rules_key_path]['default_value']
elif value_type == 'null':
valid_dict[key] = None
elif value_type == 'boolean':
valid_dict[key] = False
elif value_type == 'number':
valid_dict[key] = 0.0
if 'integer_data' in self.keyMap[rules_key_path].keys():
if self.keyMap[rules_key_path]['integer_data']:
valid_dict[key] = 0
elif value_type == 'string':
valid_dict[key] = ''
elif value_type == 'list':
valid_dict[key] = []
elif value_type == 'map':
valid_dict[key] = self._ingest_dict({}, schema_dict[key], key_path)
# add extra fields if set to True
if self.keyMap[rules_path_to_root]['extra_fields']:
for key, value in input_dict.items():
if key not in valid_dict.keys():
valid_dict[key] = value
return valid_dict
def _ingest_list(self, input_list, schema_list, path_to_root):
'''
a helper method for ingesting items in a list
:return: valid_list
'''
valid_list = []
# construct max list size
max_size = None
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'max_size' in self.keyMap[rules_path_to_root].keys():
if not self.keyMap[rules_path_to_root]['max_size']:
return valid_list
else:
max_size = self.keyMap[rules_path_to_root]['max_size']
# iterate over items in input list
if input_list:
rules_index = self._datatype_classes.index(schema_list[0].__class__)
rules_type = self._datatype_names[rules_index]
for i in range(len(input_list)):
item_path = '%s[%s]' % (path_to_root, i)
value_match = False
try:
item_index = self._datatype_classes.index(input_list[i].__class__)
item_type = self._datatype_names[item_index]
if item_type == rules_type:
value_match = True
except:
value_match = False
if value_match:
try:
if item_type == 'boolean':
valid_list.append(self._validate_boolean(input_list[i], item_path))
elif item_type == 'number':
valid_list.append(self._validate_number(input_list[i], item_path))
elif item_type == 'string':
valid_list.append(self._validate_string(input_list[i], item_path))
elif item_type == 'map':
valid_list.append(self._ingest_dict(input_list[i], schema_list[0], item_path))
elif item_type == 'list':
valid_list.append(self._ingest_list(input_list[i], schema_list[0], item_path))
except:
pass
if isinstance(max_size, int):
if len(valid_list) == max_size:
return valid_list
return valid_list
def _ingest_number(self, input_number, path_to_root):
'''
a helper method for ingesting a number
:return: valid_number
'''
valid_number = 0.0
try:
valid_number = self._validate_number(input_number, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_number = self.keyMap[rules_path_to_root]['default_value']
elif 'integer_data' in self.keyMap[rules_path_to_root].keys():
if self.keyMap[rules_path_to_root]['integer_data']:
valid_number = 0
return valid_number
def _ingest_boolean(self, input_boolean, path_to_root):
'''
a helper method for ingesting a boolean
:return: valid_boolean
'''
valid_boolean = False
try:
valid_boolean = self._validate_boolean(input_boolean, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_boolean = self.keyMap[rules_path_to_root]['default_value']
return valid_boolean
def _reconstruct(self, path_to_root):
'''
a helper method for finding the schema endpoint from a path to root
:param path_to_root: string with dot path to root from
:return: list, dict, string, number, or boolean at path to root
'''
# split path to root into segments
item_pattern = re.compile('\d+\\]')
dot_pattern = re.compile('\\.|\\[')
path_segments = dot_pattern.split(path_to_root)
# construct base schema endpoint
schema_endpoint = self.schema
# reconstruct schema endpoint from segments
if path_segments[1]:
for i in range(1,len(path_segments)):
if item_pattern.match(path_segments[i]):
schema_endpoint = schema_endpoint[0]
else:
schema_endpoint = schema_endpoint[path_segments[i]]
return schema_endpoint
def _walk(self, path_to_root, record_dict):
'''
a helper method for finding the record endpoint from a path to root
:param path_to_root: string with dot path to root from
:param record_dict:
:return: list, dict, string, number, or boolean at path to root
'''
# split path to root into segments
item_pattern = re.compile('\d+\\]')
dot_pattern = re.compile('\\.|\\[')
path_segments = dot_pattern.split(path_to_root)
# construct empty fields
record_endpoints = []
# determine starting position
if not path_segments[0]:
path_segments.pop(0)
# define internal recursive function
def _walk_int(path_segments, record_dict):
record_endpoint = record_dict
for i in range(0, len(path_segments)):
if item_pattern.match(path_segments[i]):
for j in range(0, len(record_endpoint)):
if len(path_segments) == 2:
record_endpoints.append(record_endpoint[j])
else:
stop_chain = False
for x in range(0, i):
if item_pattern.match(path_segments[x]):
stop_chain = True
if not stop_chain:
shortened_segments = []
for z in range(i + 1, len(path_segments)):
shortened_segments.append(path_segments[z])
_walk_int(shortened_segments, record_endpoint[j])
else:
stop_chain = False
for y in range(0, i):
if item_pattern.match(path_segments[y]):
stop_chain = True
if not stop_chain:
if len(path_segments) == i + 1:
record_endpoints.append(record_endpoint[path_segments[i]])
else:
record_endpoint = record_endpoint[path_segments[i]]
# conduct recursive walk
_walk_int(path_segments, record_dict)
return record_endpoints
def validate(self, input_data, path_to_root='', object_title=''):
'''
a core method for validating input against the model
input_data is only returned if all data is valid
:param input_data: list, dict, string, number, or boolean to validate
:param path_to_root: [optional] string with dot-path of model component
:param object_title: [optional] string with name of input to validate
:return: input_data (or InputValidationError)
'''
__name__ = '%s.validate' % self.__class__.__name__
_path_arg = '%s(path_to_root="...")' % __name__
_title_arg = '%s(object_title="...")' % __name__
# validate input
copy_path = path_to_root
if path_to_root:
if not isinstance(path_to_root, str):
raise ModelValidationError('%s must be a string.' % _path_arg)
else:
if path_to_root[0] != '.':
copy_path = '.%s' % path_to_root
if not copy_path in self.keyMap.keys():
raise ModelValidationError('%s does not exist in components %s.' % (_path_arg.replace('...', path_to_root), self.keyMap.keys()))
else:
copy_path = '.'
if object_title:
if not isinstance(object_title, str):
raise ModelValidationError('%s must be a string' % _title_arg)
# construct generic error dictionary
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[copy_path],
'failed_test': 'value_datatype',
'input_path': copy_path,
'error_value': input_data,
'error_code': 4001
}
# determine value type of input data
try:
input_index = self._datatype_classes.index(input_data.__class__)
except:
error_dict['error_value'] = input_data.__class__.__name__
raise InputValidationError(error_dict)
input_type = self._datatype_names[input_index]
# validate input data type
if input_type != self.keyMap[copy_path]['value_datatype']:
raise InputValidationError(error_dict)
# run helper method appropriate to data type
if input_type == 'boolean':
input_data = self._validate_boolean(input_data, copy_path, object_title)
elif input_type == 'number':
input_data = self._validate_number(input_data, copy_path, object_title)
elif input_type == 'string':
input_data = self._validate_string(input_data, copy_path, object_title)
elif input_type == 'list':
schema_list = self._reconstruct(copy_path)
input_data = self._validate_list(input_data, schema_list, copy_path, object_title)
elif input_type == 'map':
schema_dict = self._reconstruct(copy_path)
input_data = self._validate_dict(input_data, schema_dict, copy_path, object_title)
return input_data
def ingest(self, **kwargs):
'''
a core method to ingest and validate arbitrary keyword data
**NOTE: data is always returned with this method**
for each key in the model, a value is returned according
to the following priority:
1. value in kwargs if field passes validation test
2. default value declared for the key in the model
3. empty value appropriate to datatype of key in the model
**NOTE: as long as a default value is provided for each key-
value, returned data will be model valid
**NOTE: if 'extra_fields' is True for a dictionary, the key-
value pair of all fields in kwargs which are not declared in
the model will also be added to the corresponding dictionary
data
**NOTE: if 'max_size' is declared for a list, method will
stop adding input to the list once it reaches max size
:param kwargs: key, value pairs
:return: dictionary with keys and value
'''
__name__ = '%s.ingest' % self.__class__.__name__
schema_dict = self.schema
path_to_root = '.'
valid_data = self._ingest_dict(kwargs, schema_dict, path_to_root)
return valid_data
def query(self, query_criteria, valid_record=None):
'''
a core method for querying model valid data with criteria
**NOTE: input is only returned if all fields & qualifiers are valid for model
:param query_criteria: dictionary with model field names and query qualifiers
:param valid_record: dictionary with model valid record
:return: boolean (or QueryValidationError)
an example of how to construct the query_criteria argument:
query_criteria = {
'.path.to.number': {
'min_value': 4.5
},
'.path.to.string': {
'must_contain': [ '\\regex' ]
}
}
**NOTE: for a full list of operators for query_criteria based upon field
datatype, see either the query-rules.json file or REFERENCE file
'''
__name__ = '%s.query' % self.__class__.__name__
_query_arg = '%s(query_criteria={...})' % __name__
_record_arg = '%s(valid_record={...})' % __name__
# validate input
if not isinstance(query_criteria, dict):
raise ModelValidationError('%s must be a dictionary.' % _query_arg)
# convert javascript dot_path to class dot_path
criteria_copy = {}
equal_fields = []
dot_fields = []
for key, value in query_criteria.items():
copy_key = key
if not key:
copy_key = '.'
else:
if key[0] != '.':
copy_key = '.%s' % key
dot_fields.append(copy_key)
criteria_copy[copy_key] = value
if value.__class__ in self._datatype_classes[0:4]:
criteria_copy[copy_key] = {
'equal_to': value
}
equal_fields.append(copy_key)
# validate query criteria against query rules
query_kwargs = {
'fields_dict': criteria_copy,
'fields_rules': self.queryRules,
'declared_value': False
}
try:
self._validate_fields(**query_kwargs)
except ModelValidationError as err:
message = err.error['message']
for field in equal_fields:
equal_error = 'field %s qualifier equal_to' % field
if message.find(equal_error) > -1:
message = message.replace(equal_error, 'field %s' % field)
break
field_pattern = re.compile('ield\s(\..*?)\s')
field_name = field_pattern.findall(message)
if field_name:
if field_name[0] in dot_fields:
def _replace_field(x):
return 'ield %s ' % x.group(1)[1:]
message = field_pattern.sub(_replace_field, message)
raise QueryValidationError(message)
# query test record
if valid_record:
if not isinstance(valid_record, dict):
raise ModelValidationError('%s must be a dictionary.' % _record_arg)
for key, value in criteria_copy.items():
eval_outcome = self._evaluate_field(valid_record, key, value)
if not eval_outcome:
return False
return True
|
collectiveacuity/jsonModel
|
jsonmodel/validators.py
|
jsonModel._ingest_boolean
|
python
|
def _ingest_boolean(self, input_boolean, path_to_root):
'''
a helper method for ingesting a boolean
:return: valid_boolean
'''
valid_boolean = False
try:
valid_boolean = self._validate_boolean(input_boolean, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_boolean = self.keyMap[rules_path_to_root]['default_value']
return valid_boolean
|
a helper method for ingesting a boolean
:return: valid_boolean
|
train
|
https://github.com/collectiveacuity/jsonModel/blob/1ea64c36d78add3faa7b85ff82c5ec685458c940/jsonmodel/validators.py#L1368-L1385
| null |
class jsonModel(object):
__rules__ = jsonLoader('jsonmodel', 'models/model-rules.json')
def __init__(self, data_model, query_rules=None):
'''
a method for testing data model declaration & initializing the class
:param data_model: dictionary with json model architecture
:param query_rules: [optional] dictionary with valid field type qualifiers
:return: object with jsonModel methods
'''
# validate schema input
if not isinstance(data_model, dict):
raise ModelValidationError('Data model must be a dictionary.')
elif 'schema' not in data_model.keys():
raise ModelValidationError('Data model must have a schema key.')
elif not isinstance(data_model['schema'], dict):
raise ModelValidationError('Value for the data model "schema" field must be a dictionary.')
elif not data_model['schema']:
raise ModelValidationError('Data model "schema" field must not be empty.')
# construct base methods
from copy import deepcopy
data_model = deepcopy(data_model)
self.schema = data_model['schema']
model_map = mapModel(self.schema)
self.keyName = model_map.keyName
self.keyCriteria = model_map.keyCriteria
# construct protected type classes
self._datatype_names = mapModel._datatype_names
self._datatype_classes = mapModel._datatype_classes
# validate absence of item designators in keys
item_pattern = re.compile('\[\d+\]')
for i in range(len(self.keyName)):
patterns_found = item_pattern.findall(self.keyName[i])
if patterns_found:
for designator in patterns_found:
if designator != '[0]':
message = 'Key name for schema field %s must not contain the item designator pattern %s' % (self.keyName[i], designator)
raise ModelValidationError(message)
# validate existence of first item in list declarations
key_set = set(self.keyName)
for i in range(len(self.keyName)):
if self.keyCriteria[i]['value_datatype'] == 'list':
item_key = self.keyName[i] + '[0]'
if not item_key in key_set:
message = 'Schema field %s must declare an initial item for the list.' % self.keyName[i]
raise ModelValidationError(message)
# alter list requirement if first item is empty
else:
item_index = self.keyName.index(item_key)
if not self.keyCriteria[item_index]['required_field']:
self.keyCriteria[i]['required_field'] = False
# validate title input & construct title method
self.title = ''
if 'title' in data_model.keys():
if not isinstance(data_model['title'], str):
raise ModelValidationError('Value for model title must be a string.')
self.title = data_model['title']
# validate description input & construct description method
self.description = ''
if 'description' in data_model.keys():
if not isinstance(data_model['description'], str):
raise ModelValidationError('Value for model description must be a string.')
self.description = data_model['description']
# validate url input & construct title method
self.url = ''
if 'url' in data_model.keys():
if not isinstance(data_model['url'], str):
raise ModelValidationError('Value for model url must be a string.')
self.title = data_model['url']
# validate metadata input & construct metadata method
self.metadata = {}
if 'metadata' in data_model.keys():
if not isinstance(data_model['metadata'], dict):
raise ModelValidationError('Value for model metadata must be a dictionary.')
self.metadata = data_model['metadata']
# validate max size input & construct maxSize property
# self.maxSize = None
# if 'max_size' in data_model.keys():
# if not isinstance(data_model['max_size'], int):
# raise ModelValidationError('Value for model max_size must be a positive integer.')
# elif data_model['max_size'] < 0:
# raise ModelValidationError('Value for model max_size must be a positive integer.')
# elif data_model['max_size']:
# self.maxSize = data_model['max_size']
# validate components input & construct component property
self.components = {}
if 'components' in data_model.keys():
if not isinstance(data_model['components'], dict):
raise ModelValidationError('Value for model components must be a dictionary.')
self.components = self._validate_fields(data_model['components'], self.__rules__['components'])
# construct keyMap fields from key names and key criteria
self.keyMap = {}
for i in range(len(self.keyName)):
self.keyMap[self.keyName[i]] = self.keyCriteria[i]
for key, value in self.components.items():
# convert javascript dot_path to class dot_path
dot_key = ''
if not key:
dot_key = '.'
else:
if key[0] != '.':
dot_key = '.%s' % key
# add component declarations to keyMap
if key in self.keyMap.keys():
for k, v in self.components[key].items():
self.keyMap[key][k] = v
elif dot_key and dot_key in self.keyMap.keys():
for k, v in self.components[key].items():
self.keyMap[dot_key][k] = v
# validate default values in lists
self._validate_defaults(self.keyMap)
# construct queryRules property from class model rules
self.queryRules = {}
for key, value in self.__rules__['components'].items():
remove_from_query = [ 'required_field', 'default_value', 'example_values', 'field_title', 'field_position', 'field_description', 'field_metadata', 'extra_fields' ]
field_qualifiers = {
'value_exists': False
}
for k, v in value.items():
if k not in remove_from_query:
field_qualifiers[k] = v
self.queryRules[key] = field_qualifiers
# validate query rules input and replace queryRules property
if query_rules:
if not isinstance(query_rules, dict):
message = 'Value for query rules input must be a dictionary.'
raise ModelValidationError(message)
input_set = set(query_rules.keys())
req_set = set(self.queryRules.keys())
if input_set - req_set:
message = 'Query rules input may only have %s field key names.' % req_set
raise ModelValidationError(message)
elif req_set - input_set:
message = 'Query rules input must have all %s field key names.' % req_set
raise ModelValidationError(message)
for key in req_set:
if not isinstance(query_rules[key], dict):
message = 'Value for query rules %s field must be a dictionary.' % key
raise ModelValidationError(message)
input_qualifier_set = set(query_rules[key].keys())
req_qualifier_set = set(self.queryRules[key].keys())
if input_qualifier_set - req_qualifier_set:
message = 'Query rules field %s may only have qualifiers %s' % (key, req_qualifier_set)
raise ModelValidationError(message)
for k, v in query_rules[key].items():
if v.__class__ != self.queryRules[key][k].__class__:
qualifier_index = self._datatype_classes.index(self.queryRules[key][k].__class__)
qualifier_type = self._datatype_names[qualifier_index]
message = 'Value for query rules field %s qualifier %s must be a "%s" datatype.' % (key, k, qualifier_type)
raise ModelValidationError(message)
self.queryRules = query_rules
def _validate_fields(self, fields_dict, fields_rules, declared_value=True):
# validate key names in fields
for key, value in fields_dict.items():
# convert javascript dot_path to class dot_path
if not key:
key = '.'
else:
if key[0] != '.':
key = '.%s' % key
if key not in self.keyName:
raise ModelValidationError('Field %s is not a field declared in model schema.' % key)
elif not isinstance(value, dict):
raise ModelValidationError('Value for field %s must be a dictionary.' % key)
# validate field criteria are appropriate to field datatype
value_type = self.keyCriteria[self.keyName.index(key)]['value_datatype']
type_dict = {}
if value_type == 'string':
type_dict = fields_rules['.string_fields']
elif value_type == 'number':
type_dict = fields_rules['.number_fields']
elif value_type == 'boolean':
type_dict = fields_rules['.boolean_fields']
elif value_type == 'list':
type_dict = fields_rules['.list_fields']
elif value_type == 'map':
type_dict = fields_rules['.map_fields']
elif value_type == 'null':
type_dict = fields_rules['.null_fields']
if set(value.keys()) - set(type_dict.keys()):
raise ModelValidationError('Field %s may only have datatype %s qualifiers %s.' % (key, value_type, set(type_dict.keys())))
# validate criteria qualifier values are appropriate datatype
for k, v in value.items():
v_index = self._datatype_classes.index(v.__class__)
v_type = self._datatype_names[v_index]
qualifier_index = self._datatype_classes.index(type_dict[k].__class__)
qualifier_type = self._datatype_names[qualifier_index]
if v_type != qualifier_type:
message = 'Value for field %s qualifier %s must be a %s datatype.' % (key, k, qualifier_type)
raise ModelValidationError(message)
if qualifier_type == 'number':
if isinstance(type_dict[k], int):
if not isinstance(v, int):
message = 'Value for field %s qualifier %s must be an integer.' % (key, k)
raise ModelValidationError(message)
# validate internal logic of each qualifier value declaration
if k in ('must_not_contain', 'must_contain', 'contains_either'):
for item in v:
if not isinstance(item, str):
message = 'Each item in list field %s qualifier %s must be a string.' % (key, k)
raise ModelValidationError(message)
if k in ('min_length', 'max_length', 'min_size', 'max_size'):
if v < 0:
message = 'Value for field %s qualifier %s cannot be negative.' % (key, k)
raise ModelValidationError(message)
if k in ('discrete_values', 'excluded_values', 'example_values'):
for item in v:
if value_type == 'number':
if not isinstance(item, int) and not isinstance(item, float):
message = 'Each item in field %s qualifier %s list must be a number.' % (key, k)
raise ModelValidationError(message)
elif not isinstance(item, str):
message = 'Each item in list for field %s qualifier %s must be a string.' % (key, k)
raise ModelValidationError(message)
if k == 'identical_to':
if not v in self.keyName:
message = 'Value "%s" for field %s qualifier %s not found in components keys.' % (v, key, k)
raise ModelValidationError(message)
if k == 'unique_values':
if v:
item_name = key + '[0]'
item_type = self.keyCriteria[self.keyName.index(item_name)]['value_datatype']
if not item_type in ('number', 'string'):
message = 'Field %s[0] must be either a string or number if qualifier "unique_values": true' % key
raise ModelValidationError(message)
# validate lack of other qualifiers if value exist is false
if 'value_exists' in value.keys():
if not value['value_exists']:
if set(value.keys()) - {'value_exists'}:
message = 'If field %s qualifier value_exists: false, field may not have other qualifiers.' % key
raise ModelValidationError(message)
# validate size qualifiers against each other
size_qualifiers = ['min_size', 'max_size']
for qualifier in size_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
header = 'Value %s for %s' % (test_value, value_path)
if 'min_size' in value.keys():
if test_value < value['min_size']:
message = '%s must not be less than "min_size": %s' % (header, value['min_size'])
raise ModelValidationError(message)
if 'max_size' in value.keys():
if test_value > value['max_size']:
message = '%s must not be greater than "max_size": %s' % (header, value['max_size'])
raise ModelValidationError(message)
# validate length qualifiers against each other
length_qualifiers = ['min_length', 'max_length']
for qualifier in length_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
header = 'Value %s for %s' % (test_value, value_path)
if 'min_length' in value.keys():
if test_value < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if test_value > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
# validate range qualifiers against each other & length qualifiers
range_qualifiers = ['min_value', 'max_value', 'greater_than', 'less_than', 'equal_to']
for qualifier in range_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
header = 'Value %s%s%s for %s' % (quote_text, test_value, quote_text, value_path)
if 'min_value' in value.keys():
if test_value < value['min_value']:
message = '%s must not be less than "min_value": %s' % (header, value['min_value'])
raise ModelValidationError(message)
if 'max_value' in value.keys():
if test_value > value['max_value']:
message = '%s must not be greater than "max_value": %s' % (header, value['max_value'])
raise ModelValidationError(message)
if 'greater_than' in value.keys():
if test_value <= value['greater_than'] and not qualifier == 'greater_than':
message = '%s must be "greater_than": %s' % (header, value['greater_than'])
raise ModelValidationError(message)
if 'less_than' in value.keys():
if test_value >= value['less_than'] and not qualifier == 'less_than':
message = '%s must be "less_than": %s' % (header, value['less_than'])
raise ModelValidationError(message)
if 'min_length' in value.keys():
if len(test_value) < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if len(test_value) > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
if 'integer_data' in value.keys():
if value['integer_data']:
if not isinstance(test_value, int):
message = '%s must be an "integer_data".' % header
raise ModelValidationError(message)
if 'must_not_contain' in value.keys():
for regex in value['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
message = '%s matches regex pattern in "must_not_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'must_contain' in value.keys():
for regex in value['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(test_value):
message = '%s does not match regex pattern in "must_contain": ["%s"].' % (header, regex)
raise ModelValidationError(message)
if 'contains_either' in value.keys():
regex_match = False
regex_patterns = []
for regex in value['contains_either']:
regex_patterns.append(regex)
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
regex_match = True
if not regex_match:
message = '%s does not match any regex patterns in "contains_either": %s' % (header, regex_patterns)
raise ModelValidationError(message)
if 'byte_data' in value.keys():
if value['byte_data']:
if qualifier != 'equal_to':
message = '%s cannot be used with base64 encoded "byte_data".' % header
raise ModelValidationError(message)
# validate discrete value qualifiers against other criteria
schema_field = self.keyCriteria[self.keyName.index(key)]
discrete_qualifiers = ['declared_value', 'default_value', 'excluded_values', 'discrete_values', 'example_values']
for qualifier in discrete_qualifiers:
test_qualifier = False
if qualifier in schema_field:
test_qualifier = True
if qualifier == 'declared_value' and not schema_field[qualifier]:
test_qualifier = False
if qualifier in value.keys() or (test_qualifier and declared_value):
multiple_values = False
if qualifier in value.keys():
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
else:
test_list = [schema_field[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
header = 'Value %s%s%s for %s%s' % (quote_text, test_value, quote_text, value_path, item_text)
if 'min_value' in value.keys():
if test_value < value['min_value']:
message = '%s must not be less than "min_value": %s' % (header, value['min_value'])
raise ModelValidationError(message)
if 'max_value' in value.keys():
if test_value > value['max_value']:
message = '%s must not be greater than "max_value": %s' % (header, value['max_value'])
raise ModelValidationError(message)
if 'equal_to' in value.keys():
if test_value != value['equal_to']:
if qualifier != 'declared_value' and isinstance(test_value, bool):
message = '%s must be "equal_to": %s' % (header, value['equal_to'])
raise ModelValidationError(message)
if 'greater_than' in value.keys():
if test_value <= value['greater_than']:
message = '%s must be "greater_than": %s' % (header, value['greater_than'])
raise ModelValidationError(message)
if 'less_than' in value.keys():
if test_value >= value['less_than']:
message = '%s must be "less_than": %s' % (header, value['less_than'])
raise ModelValidationError(message)
if 'integer_data' in value.keys():
if value['integer_data']:
if not isinstance(test_value, int):
message = '%s must be an "integer_data".' % header
raise ModelValidationError(message)
if 'min_length' in value.keys():
if len(test_value) < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if len(test_value) > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
if 'must_not_contain' in value.keys():
for regex in value['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
message = '%s matches regex pattern in "must_not_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'must_contain' in value.keys():
for regex in value['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(test_value):
message = '%s does not match regex pattern in "must_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'contains_either' in value.keys():
regex_match = False
regex_patterns = []
for regex in value['contains_either']:
regex_patterns.append(regex)
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
regex_match = True
if not regex_match:
message = '%s does not match any regex patterns in "contains_either": %s' % (header, regex_patterns)
raise ModelValidationError(message)
if 'byte_data' in value.keys():
message = '%s cannot be base64 decoded to "byte_data".' % header
try:
decoded_bytes = b64decode(test_value)
except:
raise ModelValidationError(message)
if not isinstance(decoded_bytes, bytes):
raise ModelValidationError(message)
# validate discrete value qualifiers against each other
for qualifier in discrete_qualifiers:
test_qualifier = False
if qualifier in schema_field:
test_qualifier = True
if qualifier == 'declared_value' and not schema_field[qualifier]:
test_qualifier = False
if qualifier in value.keys() or (test_qualifier and declared_value):
multiple_values = False
if qualifier in value.keys():
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
else:
test_list = [schema_field[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
header = 'Value %s%s%s for %s%s' % (quote_text, test_value, quote_text, value_path, item_text)
if 'excluded_values' in value.keys():
if not qualifier == 'excluded_values':
if test_value in value['excluded_values']:
message = '%s cannot be one of "excluded_values": %s.' % (header, value['excluded_values'])
raise ModelValidationError(message)
if 'discrete_values' in value.keys():
if not qualifier == 'excluded_values':
if test_value not in value['discrete_values']:
message = '%s must be one of "discrete_values": %s' % (header, value['discrete_values'])
raise ModelValidationError(message)
return fields_dict
def _validate_defaults(self, fields_dict):
# validate key names in fields
for key, value in fields_dict.items():
# retrieve value type and type dict
value_type = self.keyCriteria[self.keyName.index(key)]['value_datatype']
# validate discrete value qualifiers against other criteria
qualifier = 'default_value'
if qualifier in value.keys():
multiple_values = False
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
qualifier_text = value_path + item_text
header = 'Value %s%s%s for %s' % (quote_text, test_value, quote_text, qualifier_text)
if value_type == 'list':
default_item_key = '%s[0]' % key
try:
self.validate(test_value, default_item_key, object_title=header)
except Exception as err:
raise ModelValidationError(str(err).strip().replace('field %s' % default_item_key, qualifier_text))
return fields_dict
def _evaluate_field(self, record_dict, field_name, field_criteria):
''' a helper method for evaluating record values based upon query criteria
:param record_dict: dictionary with model valid data to evaluate
:param field_name: string with path to root of query field
:param field_criteria: dictionary with query operators and qualifiers
:return: boolean (True if no field_criteria evaluate to false)
'''
# determine value existence criteria
value_exists = True
if 'value_exists' in field_criteria.keys():
if not field_criteria['value_exists']:
value_exists = False
# validate existence of field
field_exists = True
try:
record_values = self._walk(field_name, record_dict)
except:
field_exists = False
# evaluate existence query criteria
if value_exists != field_exists:
return False
elif not value_exists:
return True
# convert javascript dot_path to class dot_path
field_key = field_name
if not field_name:
field_key = '.'
else:
if field_name[0] != '.':
field_key = '.%s' % field_name
# evaluate other query criteria
for key, value in field_criteria.items():
if key in ('min_size', 'min_length'):
found = False
if self.keyMap[field_key]['value_datatype'] == 'map':
for record_value in record_values:
record_size = sys.getsizeof(json.dumps(str(record_value)).replace(' ','')) - 51
if record_size >= value:
found = True
break
else:
for record_value in record_values:
if len(record_value) >= value:
found = True
break
if not found:
return False
elif key in ('max_size', 'max_length'):
found = False
if self.keyMap[field_key]['value_datatype'] == 'map':
for record_value in record_values:
record_size = sys.getsizeof(json.dumps(str(record_value)).replace(' ','')) - 51
if record_size <= value:
found = True
break
else:
for record_value in record_values:
if len(record_value) <= value:
found = True
break
if not found:
return False
elif key == 'min_value':
found = False
for record_value in record_values:
if record_value >= value:
found = True
break
if not found:
return False
elif key == 'max_value':
found = False
for record_value in record_values:
if record_value <= value:
found = True
break
if not found:
return False
elif key == 'equal_to':
found = False
for record_value in record_values:
if record_value == value:
found = True
break
if not found:
return False
elif key == 'greater_than':
found = False
for record_value in record_values:
if record_value > value:
found = True
break
if not found:
return False
elif key == 'less_than':
found = False
for record_value in record_values:
if record_value < value:
found = True
break
if not found:
return False
elif key == 'excluded_values':
for record_value in record_values:
if record_value in value:
return False
elif key == 'discrete_values':
found = False
for record_value in record_values:
if record_value in value:
found = True
break
if not found:
return False
elif key == 'integer_data':
found = False
dummy_int = 1
for record_value in record_values:
if record_value.__class__ == dummy_int.__class__:
found = True
break
if value != found:
return False
elif key == 'byte_data':
found = False
for record_value in record_values:
try:
decoded_bytes = b64decode(record_value)
except:
decoded_bytes = ''
if isinstance(decoded_bytes, bytes):
found = True
break
if value != found:
return False
elif key == 'must_contain':
for regex in value:
regex_pattern = re.compile(regex)
found = False
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if not found:
return False
elif key == 'must_not_contain':
for regex in value:
regex_pattern = re.compile(regex)
found = False
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if found:
return False
elif key == 'contains_either':
found = False
for regex in value:
regex_pattern = re.compile(regex)
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if found:
break
if not found:
return False
elif key == 'unique_values':
for record_value in record_values:
unique_values = True
if len(record_value) != len(set(record_value)):
unique_values = False
if value != unique_values:
return False
return True
def _validate_dict(self, input_dict, schema_dict, path_to_root, object_title=''):
''' a helper method for recursively validating keys in dictionaries
:return input_dict
'''
# reconstruct key path to current dictionary in model
rules_top_level_key = re.sub('\[\d+\]', '[0]', path_to_root)
map_rules = self.keyMap[rules_top_level_key]
# construct list error report template
map_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': map_rules,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': 0,
'error_code': 4001
}
# validate map size
if 'min_size' in map_rules.keys():
input_size = sys.getsizeof(json.dumps(str(input_dict)).replace(' ','')) - 51
if input_size < map_rules['min_size']:
map_error['failed_test'] = 'min_size'
map_error['error_value'] = input_size
map_error['error_code'] = 4031
raise InputValidationError(map_error)
if 'max_size' in map_rules.keys():
input_size = sys.getsizeof(json.dumps(str(input_dict)).replace(' ','')) - 51
if input_size > map_rules['max_size']:
map_error['failed_test'] = 'max_size'
map_error['error_value'] = input_size
map_error['error_code'] = 4032
raise InputValidationError(map_error)
# construct lists of keys in input dictionary
input_keys = []
input_key_list = []
for key in input_dict.keys():
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'key_datatype',
'input_path': path_to_root,
'error_value': key,
'error_code': 4004
}
error_dict['input_criteria']['key_datatype'] = 'string'
if path_to_root == '.':
if not isinstance(key, str):
input_key_name = path_to_root + str(key)
error_dict['input_path'] = input_key_name
raise InputValidationError(error_dict)
input_key_name = path_to_root + key
else:
if not isinstance(key, str):
input_key_name = path_to_root + '.' + str(key)
error_dict['input_path'] = input_key_name
raise InputValidationError(error_dict)
input_key_name = path_to_root + '.' + key
input_keys.append(input_key_name)
input_key_list.append(key)
# TODO: validate top-level key and values against identical to reference
# TODO: run lambda function and call validation
# construct lists of keys in schema dictionary
max_keys = []
max_key_list = []
req_keys = []
req_key_list = []
for key in schema_dict.keys():
if path_to_root == '.':
schema_key_name = path_to_root + key
else:
schema_key_name = path_to_root + '.' + key
max_keys.append(schema_key_name)
max_key_list.append(key)
rules_schema_key_name = re.sub('\[\d+\]', '[0]', schema_key_name)
if self.keyMap[rules_schema_key_name]['required_field']:
req_keys.append(schema_key_name)
req_key_list.append(key)
# validate existence of required fields
missing_keys = set(req_keys) - set(input_keys)
if missing_keys:
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'required_field',
'input_path': path_to_root,
'error_value': list(missing_keys),
'error_code': 4002
}
error_dict['input_criteria']['required_keys'] = req_keys
raise InputValidationError(error_dict)
# validate existence of extra fields
extra_keys = set(input_keys) - set(max_keys)
if extra_keys and not self.keyMap[rules_top_level_key]['extra_fields']:
extra_key_list = []
for key in extra_keys:
pathless_key = re.sub(rules_top_level_key, '', key, count=1)
extra_key_list.append(pathless_key)
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'extra_fields',
'input_path': path_to_root,
'error_value': extra_key_list,
'error_code': 4003
}
error_dict['input_criteria']['maximum_scope'] = max_key_list
raise InputValidationError(error_dict)
# validate datatype of value
for key, value in input_dict.items():
if path_to_root == '.':
input_key_name = path_to_root + key
else:
input_key_name = path_to_root + '.' + key
rules_input_key_name = re.sub('\[\d+\]', '[0]', input_key_name)
if input_key_name in max_keys:
input_criteria = self.keyMap[rules_input_key_name]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': input_key_name,
'error_value': value,
'error_code': 4001
}
try:
value_index = self._datatype_classes.index(value.__class__)
except:
error_dict['error_value'] = value.__class__.__name__
raise InputValidationError(error_dict)
value_type = self._datatype_names[value_index]
if input_criteria['value_datatype'] == 'null':
pass
else:
if value_type != input_criteria['value_datatype']:
raise InputValidationError(error_dict)
# call appropriate validation sub-routine for datatype of value
if value_type == 'boolean':
input_dict[key] = self._validate_boolean(value, input_key_name, object_title)
elif value_type == 'number':
input_dict[key] = self._validate_number(value, input_key_name, object_title)
elif value_type == 'string':
input_dict[key] = self._validate_string(value, input_key_name, object_title)
elif value_type == 'map':
input_dict[key] = self._validate_dict(value, schema_dict[key], input_key_name, object_title)
elif value_type == 'list':
input_dict[key] = self._validate_list(value, schema_dict[key], input_key_name, object_title)
# set default values for empty optional fields
for key in max_key_list:
if key not in input_key_list:
indexed_key = max_keys[max_key_list.index(key)]
if indexed_key in self.components.keys():
if 'default_value' in self.components[indexed_key]:
input_dict[key] = self.components[indexed_key]['default_value']
return input_dict
def _validate_list(self, input_list, schema_list, path_to_root, object_title=''):
'''
a helper method for recursively validating items in a list
:return: input_list
'''
# construct rules for list and items
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
list_rules = self.keyMap[rules_path_to_root]
initial_key = rules_path_to_root + '[0]'
item_rules = self.keyMap[initial_key]
# construct list error report template
list_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': list_rules,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': 0,
'error_code': 4001
}
# validate list rules
if 'min_size' in list_rules.keys():
if len(input_list) < list_rules['min_size']:
list_error['failed_test'] = 'min_size'
list_error['error_value'] = len(input_list)
list_error['error_code'] = 4031
raise InputValidationError(list_error)
if 'max_size' in list_rules.keys():
if len(input_list) > list_rules['max_size']:
list_error['failed_test'] = 'max_size'
list_error['error_value'] = len(input_list)
list_error['error_code'] = 4032
raise InputValidationError(list_error)
# construct item error report template
item_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': item_rules,
'failed_test': 'value_datatype',
'input_path': initial_key,
'error_value': None,
'error_code': 4001
}
# validate datatype of items
for i in range(len(input_list)):
input_path = path_to_root + '[%s]' % i
item = input_list[i]
item_error['input_path'] = input_path
try:
item_index = self._datatype_classes.index(item.__class__)
except:
item_error['error_value'] = item.__class__.__name__
raise InputValidationError(item_error)
item_type = self._datatype_names[item_index]
item_error['error_value'] = item
if item_rules['value_datatype'] == 'null':
pass
else:
if item_type != item_rules['value_datatype']:
raise InputValidationError(item_error)
# call appropriate validation sub-routine for datatype of item
if item_type == 'boolean':
input_list[i] = self._validate_boolean(item, input_path, object_title)
elif item_type == 'number':
input_list[i] = self._validate_number(item, input_path, object_title)
elif item_type == 'string':
input_list[i] = self._validate_string(item, input_path, object_title)
elif item_type == 'map':
input_list[i] = self._validate_dict(item, schema_list[0], input_path, object_title)
elif item_type == 'list':
input_list[i] = self._validate_list(item, schema_list[0], input_path, object_title)
# validate unique values in list
if 'unique_values' in list_rules.keys():
if len(set(input_list)) < len(input_list):
list_error['failed_test'] = 'unique_values'
list_error['error_value'] = input_list
list_error['error_code'] = 4033
raise InputValidationError(list_error)
# TODO: validate top-level item values against identical to reference
# TODO: run lambda function and call validation url
return input_list
def _validate_number(self, input_number, path_to_root, object_title=''):
'''
a helper method for validating properties of a number
:return: input_number
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_number,
'error_code': 4001
}
if 'integer_data' in input_criteria.keys():
if input_criteria['integer_data'] and not isinstance(input_number, int):
error_dict['failed_test'] = 'integer_data'
error_dict['error_code'] = 4021
raise InputValidationError(error_dict)
if 'min_value' in input_criteria.keys():
if input_number < input_criteria['min_value']:
error_dict['failed_test'] = 'min_value'
error_dict['error_code'] = 4022
raise InputValidationError(error_dict)
if 'max_value' in input_criteria.keys():
if input_number > input_criteria['max_value']:
error_dict['failed_test'] = 'max_value'
error_dict['error_code'] = 4023
raise InputValidationError(error_dict)
if 'greater_than' in input_criteria.keys():
if input_number <= input_criteria['greater_than']:
error_dict['failed_test'] = 'greater_than'
error_dict['error_code'] = 4024
raise InputValidationError(error_dict)
if 'less_than' in input_criteria.keys():
if input_number >= input_criteria['less_than']:
error_dict['failed_test'] = 'less_than'
error_dict['error_code'] = 4025
raise InputValidationError(error_dict)
if 'equal_to' in input_criteria.keys():
if input_number != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
if 'discrete_values' in input_criteria.keys():
if input_number not in input_criteria['discrete_values']:
error_dict['failed_test'] = 'discrete_values'
error_dict['error_code'] = 4041
raise InputValidationError(error_dict)
if 'excluded_values' in input_criteria.keys():
if input_number in input_criteria['excluded_values']:
error_dict['failed_test'] = 'excluded_values'
error_dict['error_code'] = 4042
raise InputValidationError(error_dict)
# TODO: validate number against identical to reference
# TODO: run lambda function and call validation url
return input_number
def _validate_string(self, input_string, path_to_root, object_title=''):
'''
a helper method for validating properties of a string
:return: input_string
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_string,
'error_code': 4001
}
if 'byte_data' in input_criteria.keys():
if input_criteria['byte_data']:
error_dict['failed_test'] = 'byte_data'
error_dict['error_code'] = 4011
try:
decoded_bytes = b64decode(input_string)
except:
raise InputValidationError(error_dict)
if not isinstance(decoded_bytes, bytes):
raise InputValidationError(error_dict)
if 'min_value' in input_criteria.keys():
if input_string < input_criteria['min_value']:
error_dict['failed_test'] = 'min_value'
error_dict['error_code'] = 4022
raise InputValidationError(error_dict)
if 'max_value' in input_criteria.keys():
if input_string > input_criteria['max_value']:
error_dict['failed_test'] = 'max_value'
error_dict['error_code'] = 4023
raise InputValidationError(error_dict)
if 'greater_than' in input_criteria.keys():
if input_string <= input_criteria['greater_than']:
error_dict['failed_test'] = 'greater_than'
error_dict['error_code'] = 4024
raise InputValidationError(error_dict)
if 'less_than' in input_criteria.keys():
if input_string >= input_criteria['less_than']:
error_dict['failed_test'] = 'less_than'
error_dict['error_code'] = 4025
raise InputValidationError(error_dict)
if 'equal_to' in input_criteria.keys():
if input_string != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
if 'min_length' in input_criteria.keys():
if len(input_string) < input_criteria['min_length']:
error_dict['failed_test'] = 'min_length'
error_dict['error_code'] = 4012
raise InputValidationError(error_dict)
if 'max_length' in input_criteria.keys():
if len(input_string) > input_criteria['max_length']:
error_dict['failed_test'] = 'max_length'
error_dict['error_code'] = 4013
raise InputValidationError(error_dict)
if 'must_not_contain' in input_criteria.keys():
for regex in input_criteria['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(input_string):
error_dict['failed_test'] = 'must_not_contain'
error_dict['error_code'] = 4014
raise InputValidationError(error_dict)
if 'must_contain' in input_criteria.keys():
for regex in input_criteria['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(input_string):
error_dict['failed_test'] = 'must_contain'
error_dict['error_code'] = 4015
raise InputValidationError(error_dict)
if 'contains_either' in input_criteria.keys():
regex_match = False
for regex in input_criteria['contains_either']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(input_string):
regex_match = True
if not regex_match:
error_dict['failed_test'] = 'contains_either'
error_dict['error_code'] = 4016
raise InputValidationError(error_dict)
if 'discrete_values' in input_criteria.keys():
if input_string not in input_criteria['discrete_values']:
error_dict['failed_test'] = 'discrete_values'
error_dict['error_code'] = 4041
raise InputValidationError(error_dict)
if 'excluded_values' in input_criteria.keys():
if input_string in input_criteria['excluded_values']:
error_dict['failed_test'] = 'excluded_values'
error_dict['error_code'] = 4042
raise InputValidationError(error_dict)
# TODO: validate string against identical to reference
# TODO: run lambda function and call validation url
return input_string
def _validate_boolean(self, input_boolean, path_to_root, object_title=''):
'''
a helper method for validating properties of a boolean
:return: input_boolean
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_boolean,
'error_code': 4001
}
if 'equal_to' in input_criteria.keys():
if input_boolean != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
# TODO: validate boolean against identical to reference
# TODO: run lambda function and call validation url
return input_boolean
def _ingest_dict(self, input_dict, schema_dict, path_to_root):
'''
a helper method for ingesting keys, value pairs in a dictionary
:return: valid_dict
'''
valid_dict = {}
# construct path to root for rules
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
# iterate over keys in schema dict
for key, value in schema_dict.items():
key_path = path_to_root
if not key_path == '.':
key_path += '.'
key_path += key
rules_key_path = re.sub('\[\d+\]', '[0]', key_path)
value_match = False
if key in input_dict.keys():
value_index = self._datatype_classes.index(value.__class__)
value_type = self._datatype_names[value_index]
try:
v_index = self._datatype_classes.index(input_dict[key].__class__)
v_type = self._datatype_names[v_index]
if v_type == value_type:
value_match = True
except:
value_match = False
if value_match:
if value_type == 'null':
valid_dict[key] = input_dict[key]
elif value_type == 'boolean':
valid_dict[key] = self._ingest_boolean(input_dict[key], key_path)
elif value_type == 'number':
valid_dict[key] = self._ingest_number(input_dict[key], key_path)
elif value_type == 'string':
valid_dict[key] = self._ingest_string(input_dict[key], key_path)
elif value_type == 'map':
valid_dict[key] = self._ingest_dict(input_dict[key], schema_dict[key], key_path)
elif value_type == 'list':
valid_dict[key] = self._ingest_list(input_dict[key], schema_dict[key], key_path)
else:
value_type = self.keyMap[rules_key_path]['value_datatype']
if 'default_value' in self.keyMap[rules_key_path]:
valid_dict[key] = self.keyMap[rules_key_path]['default_value']
elif value_type == 'null':
valid_dict[key] = None
elif value_type == 'boolean':
valid_dict[key] = False
elif value_type == 'number':
valid_dict[key] = 0.0
if 'integer_data' in self.keyMap[rules_key_path].keys():
if self.keyMap[rules_key_path]['integer_data']:
valid_dict[key] = 0
elif value_type == 'string':
valid_dict[key] = ''
elif value_type == 'list':
valid_dict[key] = []
elif value_type == 'map':
valid_dict[key] = self._ingest_dict({}, schema_dict[key], key_path)
# add extra fields if set to True
if self.keyMap[rules_path_to_root]['extra_fields']:
for key, value in input_dict.items():
if key not in valid_dict.keys():
valid_dict[key] = value
return valid_dict
def _ingest_list(self, input_list, schema_list, path_to_root):
'''
a helper method for ingesting items in a list
:return: valid_list
'''
valid_list = []
# construct max list size
max_size = None
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'max_size' in self.keyMap[rules_path_to_root].keys():
if not self.keyMap[rules_path_to_root]['max_size']:
return valid_list
else:
max_size = self.keyMap[rules_path_to_root]['max_size']
# iterate over items in input list
if input_list:
rules_index = self._datatype_classes.index(schema_list[0].__class__)
rules_type = self._datatype_names[rules_index]
for i in range(len(input_list)):
item_path = '%s[%s]' % (path_to_root, i)
value_match = False
try:
item_index = self._datatype_classes.index(input_list[i].__class__)
item_type = self._datatype_names[item_index]
if item_type == rules_type:
value_match = True
except:
value_match = False
if value_match:
try:
if item_type == 'boolean':
valid_list.append(self._validate_boolean(input_list[i], item_path))
elif item_type == 'number':
valid_list.append(self._validate_number(input_list[i], item_path))
elif item_type == 'string':
valid_list.append(self._validate_string(input_list[i], item_path))
elif item_type == 'map':
valid_list.append(self._ingest_dict(input_list[i], schema_list[0], item_path))
elif item_type == 'list':
valid_list.append(self._ingest_list(input_list[i], schema_list[0], item_path))
except:
pass
if isinstance(max_size, int):
if len(valid_list) == max_size:
return valid_list
return valid_list
def _ingest_number(self, input_number, path_to_root):
'''
a helper method for ingesting a number
:return: valid_number
'''
valid_number = 0.0
try:
valid_number = self._validate_number(input_number, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_number = self.keyMap[rules_path_to_root]['default_value']
elif 'integer_data' in self.keyMap[rules_path_to_root].keys():
if self.keyMap[rules_path_to_root]['integer_data']:
valid_number = 0
return valid_number
def _ingest_string(self, input_string, path_to_root):
'''
a helper method for ingesting a string
:return: valid_string
'''
valid_string = ''
try:
valid_string = self._validate_string(input_string, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_string = self.keyMap[rules_path_to_root]['default_value']
return valid_string
def _reconstruct(self, path_to_root):
'''
a helper method for finding the schema endpoint from a path to root
:param path_to_root: string with dot path to root from
:return: list, dict, string, number, or boolean at path to root
'''
# split path to root into segments
item_pattern = re.compile('\d+\\]')
dot_pattern = re.compile('\\.|\\[')
path_segments = dot_pattern.split(path_to_root)
# construct base schema endpoint
schema_endpoint = self.schema
# reconstruct schema endpoint from segments
if path_segments[1]:
for i in range(1,len(path_segments)):
if item_pattern.match(path_segments[i]):
schema_endpoint = schema_endpoint[0]
else:
schema_endpoint = schema_endpoint[path_segments[i]]
return schema_endpoint
def _walk(self, path_to_root, record_dict):
'''
a helper method for finding the record endpoint from a path to root
:param path_to_root: string with dot path to root from
:param record_dict:
:return: list, dict, string, number, or boolean at path to root
'''
# split path to root into segments
item_pattern = re.compile('\d+\\]')
dot_pattern = re.compile('\\.|\\[')
path_segments = dot_pattern.split(path_to_root)
# construct empty fields
record_endpoints = []
# determine starting position
if not path_segments[0]:
path_segments.pop(0)
# define internal recursive function
def _walk_int(path_segments, record_dict):
record_endpoint = record_dict
for i in range(0, len(path_segments)):
if item_pattern.match(path_segments[i]):
for j in range(0, len(record_endpoint)):
if len(path_segments) == 2:
record_endpoints.append(record_endpoint[j])
else:
stop_chain = False
for x in range(0, i):
if item_pattern.match(path_segments[x]):
stop_chain = True
if not stop_chain:
shortened_segments = []
for z in range(i + 1, len(path_segments)):
shortened_segments.append(path_segments[z])
_walk_int(shortened_segments, record_endpoint[j])
else:
stop_chain = False
for y in range(0, i):
if item_pattern.match(path_segments[y]):
stop_chain = True
if not stop_chain:
if len(path_segments) == i + 1:
record_endpoints.append(record_endpoint[path_segments[i]])
else:
record_endpoint = record_endpoint[path_segments[i]]
# conduct recursive walk
_walk_int(path_segments, record_dict)
return record_endpoints
def validate(self, input_data, path_to_root='', object_title=''):
'''
a core method for validating input against the model
input_data is only returned if all data is valid
:param input_data: list, dict, string, number, or boolean to validate
:param path_to_root: [optional] string with dot-path of model component
:param object_title: [optional] string with name of input to validate
:return: input_data (or InputValidationError)
'''
__name__ = '%s.validate' % self.__class__.__name__
_path_arg = '%s(path_to_root="...")' % __name__
_title_arg = '%s(object_title="...")' % __name__
# validate input
copy_path = path_to_root
if path_to_root:
if not isinstance(path_to_root, str):
raise ModelValidationError('%s must be a string.' % _path_arg)
else:
if path_to_root[0] != '.':
copy_path = '.%s' % path_to_root
if not copy_path in self.keyMap.keys():
raise ModelValidationError('%s does not exist in components %s.' % (_path_arg.replace('...', path_to_root), self.keyMap.keys()))
else:
copy_path = '.'
if object_title:
if not isinstance(object_title, str):
raise ModelValidationError('%s must be a string' % _title_arg)
# construct generic error dictionary
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[copy_path],
'failed_test': 'value_datatype',
'input_path': copy_path,
'error_value': input_data,
'error_code': 4001
}
# determine value type of input data
try:
input_index = self._datatype_classes.index(input_data.__class__)
except:
error_dict['error_value'] = input_data.__class__.__name__
raise InputValidationError(error_dict)
input_type = self._datatype_names[input_index]
# validate input data type
if input_type != self.keyMap[copy_path]['value_datatype']:
raise InputValidationError(error_dict)
# run helper method appropriate to data type
if input_type == 'boolean':
input_data = self._validate_boolean(input_data, copy_path, object_title)
elif input_type == 'number':
input_data = self._validate_number(input_data, copy_path, object_title)
elif input_type == 'string':
input_data = self._validate_string(input_data, copy_path, object_title)
elif input_type == 'list':
schema_list = self._reconstruct(copy_path)
input_data = self._validate_list(input_data, schema_list, copy_path, object_title)
elif input_type == 'map':
schema_dict = self._reconstruct(copy_path)
input_data = self._validate_dict(input_data, schema_dict, copy_path, object_title)
return input_data
def ingest(self, **kwargs):
'''
a core method to ingest and validate arbitrary keyword data
**NOTE: data is always returned with this method**
for each key in the model, a value is returned according
to the following priority:
1. value in kwargs if field passes validation test
2. default value declared for the key in the model
3. empty value appropriate to datatype of key in the model
**NOTE: as long as a default value is provided for each key-
value, returned data will be model valid
**NOTE: if 'extra_fields' is True for a dictionary, the key-
value pair of all fields in kwargs which are not declared in
the model will also be added to the corresponding dictionary
data
**NOTE: if 'max_size' is declared for a list, method will
stop adding input to the list once it reaches max size
:param kwargs: key, value pairs
:return: dictionary with keys and value
'''
__name__ = '%s.ingest' % self.__class__.__name__
schema_dict = self.schema
path_to_root = '.'
valid_data = self._ingest_dict(kwargs, schema_dict, path_to_root)
return valid_data
def query(self, query_criteria, valid_record=None):
'''
a core method for querying model valid data with criteria
**NOTE: input is only returned if all fields & qualifiers are valid for model
:param query_criteria: dictionary with model field names and query qualifiers
:param valid_record: dictionary with model valid record
:return: boolean (or QueryValidationError)
an example of how to construct the query_criteria argument:
query_criteria = {
'.path.to.number': {
'min_value': 4.5
},
'.path.to.string': {
'must_contain': [ '\\regex' ]
}
}
**NOTE: for a full list of operators for query_criteria based upon field
datatype, see either the query-rules.json file or REFERENCE file
'''
__name__ = '%s.query' % self.__class__.__name__
_query_arg = '%s(query_criteria={...})' % __name__
_record_arg = '%s(valid_record={...})' % __name__
# validate input
if not isinstance(query_criteria, dict):
raise ModelValidationError('%s must be a dictionary.' % _query_arg)
# convert javascript dot_path to class dot_path
criteria_copy = {}
equal_fields = []
dot_fields = []
for key, value in query_criteria.items():
copy_key = key
if not key:
copy_key = '.'
else:
if key[0] != '.':
copy_key = '.%s' % key
dot_fields.append(copy_key)
criteria_copy[copy_key] = value
if value.__class__ in self._datatype_classes[0:4]:
criteria_copy[copy_key] = {
'equal_to': value
}
equal_fields.append(copy_key)
# validate query criteria against query rules
query_kwargs = {
'fields_dict': criteria_copy,
'fields_rules': self.queryRules,
'declared_value': False
}
try:
self._validate_fields(**query_kwargs)
except ModelValidationError as err:
message = err.error['message']
for field in equal_fields:
equal_error = 'field %s qualifier equal_to' % field
if message.find(equal_error) > -1:
message = message.replace(equal_error, 'field %s' % field)
break
field_pattern = re.compile('ield\s(\..*?)\s')
field_name = field_pattern.findall(message)
if field_name:
if field_name[0] in dot_fields:
def _replace_field(x):
return 'ield %s ' % x.group(1)[1:]
message = field_pattern.sub(_replace_field, message)
raise QueryValidationError(message)
# query test record
if valid_record:
if not isinstance(valid_record, dict):
raise ModelValidationError('%s must be a dictionary.' % _record_arg)
for key, value in criteria_copy.items():
eval_outcome = self._evaluate_field(valid_record, key, value)
if not eval_outcome:
return False
return True
|
collectiveacuity/jsonModel
|
jsonmodel/validators.py
|
jsonModel._reconstruct
|
python
|
def _reconstruct(self, path_to_root):
'''
a helper method for finding the schema endpoint from a path to root
:param path_to_root: string with dot path to root from
:return: list, dict, string, number, or boolean at path to root
'''
# split path to root into segments
item_pattern = re.compile('\d+\\]')
dot_pattern = re.compile('\\.|\\[')
path_segments = dot_pattern.split(path_to_root)
# construct base schema endpoint
schema_endpoint = self.schema
# reconstruct schema endpoint from segments
if path_segments[1]:
for i in range(1,len(path_segments)):
if item_pattern.match(path_segments[i]):
schema_endpoint = schema_endpoint[0]
else:
schema_endpoint = schema_endpoint[path_segments[i]]
return schema_endpoint
|
a helper method for finding the schema endpoint from a path to root
:param path_to_root: string with dot path to root from
:return: list, dict, string, number, or boolean at path to root
|
train
|
https://github.com/collectiveacuity/jsonModel/blob/1ea64c36d78add3faa7b85ff82c5ec685458c940/jsonmodel/validators.py#L1387-L1412
| null |
class jsonModel(object):
__rules__ = jsonLoader('jsonmodel', 'models/model-rules.json')
def __init__(self, data_model, query_rules=None):
'''
a method for testing data model declaration & initializing the class
:param data_model: dictionary with json model architecture
:param query_rules: [optional] dictionary with valid field type qualifiers
:return: object with jsonModel methods
'''
# validate schema input
if not isinstance(data_model, dict):
raise ModelValidationError('Data model must be a dictionary.')
elif 'schema' not in data_model.keys():
raise ModelValidationError('Data model must have a schema key.')
elif not isinstance(data_model['schema'], dict):
raise ModelValidationError('Value for the data model "schema" field must be a dictionary.')
elif not data_model['schema']:
raise ModelValidationError('Data model "schema" field must not be empty.')
# construct base methods
from copy import deepcopy
data_model = deepcopy(data_model)
self.schema = data_model['schema']
model_map = mapModel(self.schema)
self.keyName = model_map.keyName
self.keyCriteria = model_map.keyCriteria
# construct protected type classes
self._datatype_names = mapModel._datatype_names
self._datatype_classes = mapModel._datatype_classes
# validate absence of item designators in keys
item_pattern = re.compile('\[\d+\]')
for i in range(len(self.keyName)):
patterns_found = item_pattern.findall(self.keyName[i])
if patterns_found:
for designator in patterns_found:
if designator != '[0]':
message = 'Key name for schema field %s must not contain the item designator pattern %s' % (self.keyName[i], designator)
raise ModelValidationError(message)
# validate existence of first item in list declarations
key_set = set(self.keyName)
for i in range(len(self.keyName)):
if self.keyCriteria[i]['value_datatype'] == 'list':
item_key = self.keyName[i] + '[0]'
if not item_key in key_set:
message = 'Schema field %s must declare an initial item for the list.' % self.keyName[i]
raise ModelValidationError(message)
# alter list requirement if first item is empty
else:
item_index = self.keyName.index(item_key)
if not self.keyCriteria[item_index]['required_field']:
self.keyCriteria[i]['required_field'] = False
# validate title input & construct title method
self.title = ''
if 'title' in data_model.keys():
if not isinstance(data_model['title'], str):
raise ModelValidationError('Value for model title must be a string.')
self.title = data_model['title']
# validate description input & construct description method
self.description = ''
if 'description' in data_model.keys():
if not isinstance(data_model['description'], str):
raise ModelValidationError('Value for model description must be a string.')
self.description = data_model['description']
# validate url input & construct title method
self.url = ''
if 'url' in data_model.keys():
if not isinstance(data_model['url'], str):
raise ModelValidationError('Value for model url must be a string.')
self.title = data_model['url']
# validate metadata input & construct metadata method
self.metadata = {}
if 'metadata' in data_model.keys():
if not isinstance(data_model['metadata'], dict):
raise ModelValidationError('Value for model metadata must be a dictionary.')
self.metadata = data_model['metadata']
# validate max size input & construct maxSize property
# self.maxSize = None
# if 'max_size' in data_model.keys():
# if not isinstance(data_model['max_size'], int):
# raise ModelValidationError('Value for model max_size must be a positive integer.')
# elif data_model['max_size'] < 0:
# raise ModelValidationError('Value for model max_size must be a positive integer.')
# elif data_model['max_size']:
# self.maxSize = data_model['max_size']
# validate components input & construct component property
self.components = {}
if 'components' in data_model.keys():
if not isinstance(data_model['components'], dict):
raise ModelValidationError('Value for model components must be a dictionary.')
self.components = self._validate_fields(data_model['components'], self.__rules__['components'])
# construct keyMap fields from key names and key criteria
self.keyMap = {}
for i in range(len(self.keyName)):
self.keyMap[self.keyName[i]] = self.keyCriteria[i]
for key, value in self.components.items():
# convert javascript dot_path to class dot_path
dot_key = ''
if not key:
dot_key = '.'
else:
if key[0] != '.':
dot_key = '.%s' % key
# add component declarations to keyMap
if key in self.keyMap.keys():
for k, v in self.components[key].items():
self.keyMap[key][k] = v
elif dot_key and dot_key in self.keyMap.keys():
for k, v in self.components[key].items():
self.keyMap[dot_key][k] = v
# validate default values in lists
self._validate_defaults(self.keyMap)
# construct queryRules property from class model rules
self.queryRules = {}
for key, value in self.__rules__['components'].items():
remove_from_query = [ 'required_field', 'default_value', 'example_values', 'field_title', 'field_position', 'field_description', 'field_metadata', 'extra_fields' ]
field_qualifiers = {
'value_exists': False
}
for k, v in value.items():
if k not in remove_from_query:
field_qualifiers[k] = v
self.queryRules[key] = field_qualifiers
# validate query rules input and replace queryRules property
if query_rules:
if not isinstance(query_rules, dict):
message = 'Value for query rules input must be a dictionary.'
raise ModelValidationError(message)
input_set = set(query_rules.keys())
req_set = set(self.queryRules.keys())
if input_set - req_set:
message = 'Query rules input may only have %s field key names.' % req_set
raise ModelValidationError(message)
elif req_set - input_set:
message = 'Query rules input must have all %s field key names.' % req_set
raise ModelValidationError(message)
for key in req_set:
if not isinstance(query_rules[key], dict):
message = 'Value for query rules %s field must be a dictionary.' % key
raise ModelValidationError(message)
input_qualifier_set = set(query_rules[key].keys())
req_qualifier_set = set(self.queryRules[key].keys())
if input_qualifier_set - req_qualifier_set:
message = 'Query rules field %s may only have qualifiers %s' % (key, req_qualifier_set)
raise ModelValidationError(message)
for k, v in query_rules[key].items():
if v.__class__ != self.queryRules[key][k].__class__:
qualifier_index = self._datatype_classes.index(self.queryRules[key][k].__class__)
qualifier_type = self._datatype_names[qualifier_index]
message = 'Value for query rules field %s qualifier %s must be a "%s" datatype.' % (key, k, qualifier_type)
raise ModelValidationError(message)
self.queryRules = query_rules
def _validate_fields(self, fields_dict, fields_rules, declared_value=True):
# validate key names in fields
for key, value in fields_dict.items():
# convert javascript dot_path to class dot_path
if not key:
key = '.'
else:
if key[0] != '.':
key = '.%s' % key
if key not in self.keyName:
raise ModelValidationError('Field %s is not a field declared in model schema.' % key)
elif not isinstance(value, dict):
raise ModelValidationError('Value for field %s must be a dictionary.' % key)
# validate field criteria are appropriate to field datatype
value_type = self.keyCriteria[self.keyName.index(key)]['value_datatype']
type_dict = {}
if value_type == 'string':
type_dict = fields_rules['.string_fields']
elif value_type == 'number':
type_dict = fields_rules['.number_fields']
elif value_type == 'boolean':
type_dict = fields_rules['.boolean_fields']
elif value_type == 'list':
type_dict = fields_rules['.list_fields']
elif value_type == 'map':
type_dict = fields_rules['.map_fields']
elif value_type == 'null':
type_dict = fields_rules['.null_fields']
if set(value.keys()) - set(type_dict.keys()):
raise ModelValidationError('Field %s may only have datatype %s qualifiers %s.' % (key, value_type, set(type_dict.keys())))
# validate criteria qualifier values are appropriate datatype
for k, v in value.items():
v_index = self._datatype_classes.index(v.__class__)
v_type = self._datatype_names[v_index]
qualifier_index = self._datatype_classes.index(type_dict[k].__class__)
qualifier_type = self._datatype_names[qualifier_index]
if v_type != qualifier_type:
message = 'Value for field %s qualifier %s must be a %s datatype.' % (key, k, qualifier_type)
raise ModelValidationError(message)
if qualifier_type == 'number':
if isinstance(type_dict[k], int):
if not isinstance(v, int):
message = 'Value for field %s qualifier %s must be an integer.' % (key, k)
raise ModelValidationError(message)
# validate internal logic of each qualifier value declaration
if k in ('must_not_contain', 'must_contain', 'contains_either'):
for item in v:
if not isinstance(item, str):
message = 'Each item in list field %s qualifier %s must be a string.' % (key, k)
raise ModelValidationError(message)
if k in ('min_length', 'max_length', 'min_size', 'max_size'):
if v < 0:
message = 'Value for field %s qualifier %s cannot be negative.' % (key, k)
raise ModelValidationError(message)
if k in ('discrete_values', 'excluded_values', 'example_values'):
for item in v:
if value_type == 'number':
if not isinstance(item, int) and not isinstance(item, float):
message = 'Each item in field %s qualifier %s list must be a number.' % (key, k)
raise ModelValidationError(message)
elif not isinstance(item, str):
message = 'Each item in list for field %s qualifier %s must be a string.' % (key, k)
raise ModelValidationError(message)
if k == 'identical_to':
if not v in self.keyName:
message = 'Value "%s" for field %s qualifier %s not found in components keys.' % (v, key, k)
raise ModelValidationError(message)
if k == 'unique_values':
if v:
item_name = key + '[0]'
item_type = self.keyCriteria[self.keyName.index(item_name)]['value_datatype']
if not item_type in ('number', 'string'):
message = 'Field %s[0] must be either a string or number if qualifier "unique_values": true' % key
raise ModelValidationError(message)
# validate lack of other qualifiers if value exist is false
if 'value_exists' in value.keys():
if not value['value_exists']:
if set(value.keys()) - {'value_exists'}:
message = 'If field %s qualifier value_exists: false, field may not have other qualifiers.' % key
raise ModelValidationError(message)
# validate size qualifiers against each other
size_qualifiers = ['min_size', 'max_size']
for qualifier in size_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
header = 'Value %s for %s' % (test_value, value_path)
if 'min_size' in value.keys():
if test_value < value['min_size']:
message = '%s must not be less than "min_size": %s' % (header, value['min_size'])
raise ModelValidationError(message)
if 'max_size' in value.keys():
if test_value > value['max_size']:
message = '%s must not be greater than "max_size": %s' % (header, value['max_size'])
raise ModelValidationError(message)
# validate length qualifiers against each other
length_qualifiers = ['min_length', 'max_length']
for qualifier in length_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
header = 'Value %s for %s' % (test_value, value_path)
if 'min_length' in value.keys():
if test_value < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if test_value > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
# validate range qualifiers against each other & length qualifiers
range_qualifiers = ['min_value', 'max_value', 'greater_than', 'less_than', 'equal_to']
for qualifier in range_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
header = 'Value %s%s%s for %s' % (quote_text, test_value, quote_text, value_path)
if 'min_value' in value.keys():
if test_value < value['min_value']:
message = '%s must not be less than "min_value": %s' % (header, value['min_value'])
raise ModelValidationError(message)
if 'max_value' in value.keys():
if test_value > value['max_value']:
message = '%s must not be greater than "max_value": %s' % (header, value['max_value'])
raise ModelValidationError(message)
if 'greater_than' in value.keys():
if test_value <= value['greater_than'] and not qualifier == 'greater_than':
message = '%s must be "greater_than": %s' % (header, value['greater_than'])
raise ModelValidationError(message)
if 'less_than' in value.keys():
if test_value >= value['less_than'] and not qualifier == 'less_than':
message = '%s must be "less_than": %s' % (header, value['less_than'])
raise ModelValidationError(message)
if 'min_length' in value.keys():
if len(test_value) < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if len(test_value) > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
if 'integer_data' in value.keys():
if value['integer_data']:
if not isinstance(test_value, int):
message = '%s must be an "integer_data".' % header
raise ModelValidationError(message)
if 'must_not_contain' in value.keys():
for regex in value['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
message = '%s matches regex pattern in "must_not_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'must_contain' in value.keys():
for regex in value['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(test_value):
message = '%s does not match regex pattern in "must_contain": ["%s"].' % (header, regex)
raise ModelValidationError(message)
if 'contains_either' in value.keys():
regex_match = False
regex_patterns = []
for regex in value['contains_either']:
regex_patterns.append(regex)
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
regex_match = True
if not regex_match:
message = '%s does not match any regex patterns in "contains_either": %s' % (header, regex_patterns)
raise ModelValidationError(message)
if 'byte_data' in value.keys():
if value['byte_data']:
if qualifier != 'equal_to':
message = '%s cannot be used with base64 encoded "byte_data".' % header
raise ModelValidationError(message)
# validate discrete value qualifiers against other criteria
schema_field = self.keyCriteria[self.keyName.index(key)]
discrete_qualifiers = ['declared_value', 'default_value', 'excluded_values', 'discrete_values', 'example_values']
for qualifier in discrete_qualifiers:
test_qualifier = False
if qualifier in schema_field:
test_qualifier = True
if qualifier == 'declared_value' and not schema_field[qualifier]:
test_qualifier = False
if qualifier in value.keys() or (test_qualifier and declared_value):
multiple_values = False
if qualifier in value.keys():
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
else:
test_list = [schema_field[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
header = 'Value %s%s%s for %s%s' % (quote_text, test_value, quote_text, value_path, item_text)
if 'min_value' in value.keys():
if test_value < value['min_value']:
message = '%s must not be less than "min_value": %s' % (header, value['min_value'])
raise ModelValidationError(message)
if 'max_value' in value.keys():
if test_value > value['max_value']:
message = '%s must not be greater than "max_value": %s' % (header, value['max_value'])
raise ModelValidationError(message)
if 'equal_to' in value.keys():
if test_value != value['equal_to']:
if qualifier != 'declared_value' and isinstance(test_value, bool):
message = '%s must be "equal_to": %s' % (header, value['equal_to'])
raise ModelValidationError(message)
if 'greater_than' in value.keys():
if test_value <= value['greater_than']:
message = '%s must be "greater_than": %s' % (header, value['greater_than'])
raise ModelValidationError(message)
if 'less_than' in value.keys():
if test_value >= value['less_than']:
message = '%s must be "less_than": %s' % (header, value['less_than'])
raise ModelValidationError(message)
if 'integer_data' in value.keys():
if value['integer_data']:
if not isinstance(test_value, int):
message = '%s must be an "integer_data".' % header
raise ModelValidationError(message)
if 'min_length' in value.keys():
if len(test_value) < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if len(test_value) > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
if 'must_not_contain' in value.keys():
for regex in value['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
message = '%s matches regex pattern in "must_not_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'must_contain' in value.keys():
for regex in value['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(test_value):
message = '%s does not match regex pattern in "must_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'contains_either' in value.keys():
regex_match = False
regex_patterns = []
for regex in value['contains_either']:
regex_patterns.append(regex)
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
regex_match = True
if not regex_match:
message = '%s does not match any regex patterns in "contains_either": %s' % (header, regex_patterns)
raise ModelValidationError(message)
if 'byte_data' in value.keys():
message = '%s cannot be base64 decoded to "byte_data".' % header
try:
decoded_bytes = b64decode(test_value)
except:
raise ModelValidationError(message)
if not isinstance(decoded_bytes, bytes):
raise ModelValidationError(message)
# validate discrete value qualifiers against each other
for qualifier in discrete_qualifiers:
test_qualifier = False
if qualifier in schema_field:
test_qualifier = True
if qualifier == 'declared_value' and not schema_field[qualifier]:
test_qualifier = False
if qualifier in value.keys() or (test_qualifier and declared_value):
multiple_values = False
if qualifier in value.keys():
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
else:
test_list = [schema_field[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
header = 'Value %s%s%s for %s%s' % (quote_text, test_value, quote_text, value_path, item_text)
if 'excluded_values' in value.keys():
if not qualifier == 'excluded_values':
if test_value in value['excluded_values']:
message = '%s cannot be one of "excluded_values": %s.' % (header, value['excluded_values'])
raise ModelValidationError(message)
if 'discrete_values' in value.keys():
if not qualifier == 'excluded_values':
if test_value not in value['discrete_values']:
message = '%s must be one of "discrete_values": %s' % (header, value['discrete_values'])
raise ModelValidationError(message)
return fields_dict
def _validate_defaults(self, fields_dict):
# validate key names in fields
for key, value in fields_dict.items():
# retrieve value type and type dict
value_type = self.keyCriteria[self.keyName.index(key)]['value_datatype']
# validate discrete value qualifiers against other criteria
qualifier = 'default_value'
if qualifier in value.keys():
multiple_values = False
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
qualifier_text = value_path + item_text
header = 'Value %s%s%s for %s' % (quote_text, test_value, quote_text, qualifier_text)
if value_type == 'list':
default_item_key = '%s[0]' % key
try:
self.validate(test_value, default_item_key, object_title=header)
except Exception as err:
raise ModelValidationError(str(err).strip().replace('field %s' % default_item_key, qualifier_text))
return fields_dict
def _evaluate_field(self, record_dict, field_name, field_criteria):
''' a helper method for evaluating record values based upon query criteria
:param record_dict: dictionary with model valid data to evaluate
:param field_name: string with path to root of query field
:param field_criteria: dictionary with query operators and qualifiers
:return: boolean (True if no field_criteria evaluate to false)
'''
# determine value existence criteria
value_exists = True
if 'value_exists' in field_criteria.keys():
if not field_criteria['value_exists']:
value_exists = False
# validate existence of field
field_exists = True
try:
record_values = self._walk(field_name, record_dict)
except:
field_exists = False
# evaluate existence query criteria
if value_exists != field_exists:
return False
elif not value_exists:
return True
# convert javascript dot_path to class dot_path
field_key = field_name
if not field_name:
field_key = '.'
else:
if field_name[0] != '.':
field_key = '.%s' % field_name
# evaluate other query criteria
for key, value in field_criteria.items():
if key in ('min_size', 'min_length'):
found = False
if self.keyMap[field_key]['value_datatype'] == 'map':
for record_value in record_values:
record_size = sys.getsizeof(json.dumps(str(record_value)).replace(' ','')) - 51
if record_size >= value:
found = True
break
else:
for record_value in record_values:
if len(record_value) >= value:
found = True
break
if not found:
return False
elif key in ('max_size', 'max_length'):
found = False
if self.keyMap[field_key]['value_datatype'] == 'map':
for record_value in record_values:
record_size = sys.getsizeof(json.dumps(str(record_value)).replace(' ','')) - 51
if record_size <= value:
found = True
break
else:
for record_value in record_values:
if len(record_value) <= value:
found = True
break
if not found:
return False
elif key == 'min_value':
found = False
for record_value in record_values:
if record_value >= value:
found = True
break
if not found:
return False
elif key == 'max_value':
found = False
for record_value in record_values:
if record_value <= value:
found = True
break
if not found:
return False
elif key == 'equal_to':
found = False
for record_value in record_values:
if record_value == value:
found = True
break
if not found:
return False
elif key == 'greater_than':
found = False
for record_value in record_values:
if record_value > value:
found = True
break
if not found:
return False
elif key == 'less_than':
found = False
for record_value in record_values:
if record_value < value:
found = True
break
if not found:
return False
elif key == 'excluded_values':
for record_value in record_values:
if record_value in value:
return False
elif key == 'discrete_values':
found = False
for record_value in record_values:
if record_value in value:
found = True
break
if not found:
return False
elif key == 'integer_data':
found = False
dummy_int = 1
for record_value in record_values:
if record_value.__class__ == dummy_int.__class__:
found = True
break
if value != found:
return False
elif key == 'byte_data':
found = False
for record_value in record_values:
try:
decoded_bytes = b64decode(record_value)
except:
decoded_bytes = ''
if isinstance(decoded_bytes, bytes):
found = True
break
if value != found:
return False
elif key == 'must_contain':
for regex in value:
regex_pattern = re.compile(regex)
found = False
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if not found:
return False
elif key == 'must_not_contain':
for regex in value:
regex_pattern = re.compile(regex)
found = False
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if found:
return False
elif key == 'contains_either':
found = False
for regex in value:
regex_pattern = re.compile(regex)
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if found:
break
if not found:
return False
elif key == 'unique_values':
for record_value in record_values:
unique_values = True
if len(record_value) != len(set(record_value)):
unique_values = False
if value != unique_values:
return False
return True
def _validate_dict(self, input_dict, schema_dict, path_to_root, object_title=''):
''' a helper method for recursively validating keys in dictionaries
:return input_dict
'''
# reconstruct key path to current dictionary in model
rules_top_level_key = re.sub('\[\d+\]', '[0]', path_to_root)
map_rules = self.keyMap[rules_top_level_key]
# construct list error report template
map_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': map_rules,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': 0,
'error_code': 4001
}
# validate map size
if 'min_size' in map_rules.keys():
input_size = sys.getsizeof(json.dumps(str(input_dict)).replace(' ','')) - 51
if input_size < map_rules['min_size']:
map_error['failed_test'] = 'min_size'
map_error['error_value'] = input_size
map_error['error_code'] = 4031
raise InputValidationError(map_error)
if 'max_size' in map_rules.keys():
input_size = sys.getsizeof(json.dumps(str(input_dict)).replace(' ','')) - 51
if input_size > map_rules['max_size']:
map_error['failed_test'] = 'max_size'
map_error['error_value'] = input_size
map_error['error_code'] = 4032
raise InputValidationError(map_error)
# construct lists of keys in input dictionary
input_keys = []
input_key_list = []
for key in input_dict.keys():
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'key_datatype',
'input_path': path_to_root,
'error_value': key,
'error_code': 4004
}
error_dict['input_criteria']['key_datatype'] = 'string'
if path_to_root == '.':
if not isinstance(key, str):
input_key_name = path_to_root + str(key)
error_dict['input_path'] = input_key_name
raise InputValidationError(error_dict)
input_key_name = path_to_root + key
else:
if not isinstance(key, str):
input_key_name = path_to_root + '.' + str(key)
error_dict['input_path'] = input_key_name
raise InputValidationError(error_dict)
input_key_name = path_to_root + '.' + key
input_keys.append(input_key_name)
input_key_list.append(key)
# TODO: validate top-level key and values against identical to reference
# TODO: run lambda function and call validation
# construct lists of keys in schema dictionary
max_keys = []
max_key_list = []
req_keys = []
req_key_list = []
for key in schema_dict.keys():
if path_to_root == '.':
schema_key_name = path_to_root + key
else:
schema_key_name = path_to_root + '.' + key
max_keys.append(schema_key_name)
max_key_list.append(key)
rules_schema_key_name = re.sub('\[\d+\]', '[0]', schema_key_name)
if self.keyMap[rules_schema_key_name]['required_field']:
req_keys.append(schema_key_name)
req_key_list.append(key)
# validate existence of required fields
missing_keys = set(req_keys) - set(input_keys)
if missing_keys:
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'required_field',
'input_path': path_to_root,
'error_value': list(missing_keys),
'error_code': 4002
}
error_dict['input_criteria']['required_keys'] = req_keys
raise InputValidationError(error_dict)
# validate existence of extra fields
extra_keys = set(input_keys) - set(max_keys)
if extra_keys and not self.keyMap[rules_top_level_key]['extra_fields']:
extra_key_list = []
for key in extra_keys:
pathless_key = re.sub(rules_top_level_key, '', key, count=1)
extra_key_list.append(pathless_key)
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'extra_fields',
'input_path': path_to_root,
'error_value': extra_key_list,
'error_code': 4003
}
error_dict['input_criteria']['maximum_scope'] = max_key_list
raise InputValidationError(error_dict)
# validate datatype of value
for key, value in input_dict.items():
if path_to_root == '.':
input_key_name = path_to_root + key
else:
input_key_name = path_to_root + '.' + key
rules_input_key_name = re.sub('\[\d+\]', '[0]', input_key_name)
if input_key_name in max_keys:
input_criteria = self.keyMap[rules_input_key_name]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': input_key_name,
'error_value': value,
'error_code': 4001
}
try:
value_index = self._datatype_classes.index(value.__class__)
except:
error_dict['error_value'] = value.__class__.__name__
raise InputValidationError(error_dict)
value_type = self._datatype_names[value_index]
if input_criteria['value_datatype'] == 'null':
pass
else:
if value_type != input_criteria['value_datatype']:
raise InputValidationError(error_dict)
# call appropriate validation sub-routine for datatype of value
if value_type == 'boolean':
input_dict[key] = self._validate_boolean(value, input_key_name, object_title)
elif value_type == 'number':
input_dict[key] = self._validate_number(value, input_key_name, object_title)
elif value_type == 'string':
input_dict[key] = self._validate_string(value, input_key_name, object_title)
elif value_type == 'map':
input_dict[key] = self._validate_dict(value, schema_dict[key], input_key_name, object_title)
elif value_type == 'list':
input_dict[key] = self._validate_list(value, schema_dict[key], input_key_name, object_title)
# set default values for empty optional fields
for key in max_key_list:
if key not in input_key_list:
indexed_key = max_keys[max_key_list.index(key)]
if indexed_key in self.components.keys():
if 'default_value' in self.components[indexed_key]:
input_dict[key] = self.components[indexed_key]['default_value']
return input_dict
def _validate_list(self, input_list, schema_list, path_to_root, object_title=''):
'''
a helper method for recursively validating items in a list
:return: input_list
'''
# construct rules for list and items
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
list_rules = self.keyMap[rules_path_to_root]
initial_key = rules_path_to_root + '[0]'
item_rules = self.keyMap[initial_key]
# construct list error report template
list_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': list_rules,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': 0,
'error_code': 4001
}
# validate list rules
if 'min_size' in list_rules.keys():
if len(input_list) < list_rules['min_size']:
list_error['failed_test'] = 'min_size'
list_error['error_value'] = len(input_list)
list_error['error_code'] = 4031
raise InputValidationError(list_error)
if 'max_size' in list_rules.keys():
if len(input_list) > list_rules['max_size']:
list_error['failed_test'] = 'max_size'
list_error['error_value'] = len(input_list)
list_error['error_code'] = 4032
raise InputValidationError(list_error)
# construct item error report template
item_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': item_rules,
'failed_test': 'value_datatype',
'input_path': initial_key,
'error_value': None,
'error_code': 4001
}
# validate datatype of items
for i in range(len(input_list)):
input_path = path_to_root + '[%s]' % i
item = input_list[i]
item_error['input_path'] = input_path
try:
item_index = self._datatype_classes.index(item.__class__)
except:
item_error['error_value'] = item.__class__.__name__
raise InputValidationError(item_error)
item_type = self._datatype_names[item_index]
item_error['error_value'] = item
if item_rules['value_datatype'] == 'null':
pass
else:
if item_type != item_rules['value_datatype']:
raise InputValidationError(item_error)
# call appropriate validation sub-routine for datatype of item
if item_type == 'boolean':
input_list[i] = self._validate_boolean(item, input_path, object_title)
elif item_type == 'number':
input_list[i] = self._validate_number(item, input_path, object_title)
elif item_type == 'string':
input_list[i] = self._validate_string(item, input_path, object_title)
elif item_type == 'map':
input_list[i] = self._validate_dict(item, schema_list[0], input_path, object_title)
elif item_type == 'list':
input_list[i] = self._validate_list(item, schema_list[0], input_path, object_title)
# validate unique values in list
if 'unique_values' in list_rules.keys():
if len(set(input_list)) < len(input_list):
list_error['failed_test'] = 'unique_values'
list_error['error_value'] = input_list
list_error['error_code'] = 4033
raise InputValidationError(list_error)
# TODO: validate top-level item values against identical to reference
# TODO: run lambda function and call validation url
return input_list
def _validate_number(self, input_number, path_to_root, object_title=''):
'''
a helper method for validating properties of a number
:return: input_number
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_number,
'error_code': 4001
}
if 'integer_data' in input_criteria.keys():
if input_criteria['integer_data'] and not isinstance(input_number, int):
error_dict['failed_test'] = 'integer_data'
error_dict['error_code'] = 4021
raise InputValidationError(error_dict)
if 'min_value' in input_criteria.keys():
if input_number < input_criteria['min_value']:
error_dict['failed_test'] = 'min_value'
error_dict['error_code'] = 4022
raise InputValidationError(error_dict)
if 'max_value' in input_criteria.keys():
if input_number > input_criteria['max_value']:
error_dict['failed_test'] = 'max_value'
error_dict['error_code'] = 4023
raise InputValidationError(error_dict)
if 'greater_than' in input_criteria.keys():
if input_number <= input_criteria['greater_than']:
error_dict['failed_test'] = 'greater_than'
error_dict['error_code'] = 4024
raise InputValidationError(error_dict)
if 'less_than' in input_criteria.keys():
if input_number >= input_criteria['less_than']:
error_dict['failed_test'] = 'less_than'
error_dict['error_code'] = 4025
raise InputValidationError(error_dict)
if 'equal_to' in input_criteria.keys():
if input_number != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
if 'discrete_values' in input_criteria.keys():
if input_number not in input_criteria['discrete_values']:
error_dict['failed_test'] = 'discrete_values'
error_dict['error_code'] = 4041
raise InputValidationError(error_dict)
if 'excluded_values' in input_criteria.keys():
if input_number in input_criteria['excluded_values']:
error_dict['failed_test'] = 'excluded_values'
error_dict['error_code'] = 4042
raise InputValidationError(error_dict)
# TODO: validate number against identical to reference
# TODO: run lambda function and call validation url
return input_number
def _validate_string(self, input_string, path_to_root, object_title=''):
'''
a helper method for validating properties of a string
:return: input_string
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_string,
'error_code': 4001
}
if 'byte_data' in input_criteria.keys():
if input_criteria['byte_data']:
error_dict['failed_test'] = 'byte_data'
error_dict['error_code'] = 4011
try:
decoded_bytes = b64decode(input_string)
except:
raise InputValidationError(error_dict)
if not isinstance(decoded_bytes, bytes):
raise InputValidationError(error_dict)
if 'min_value' in input_criteria.keys():
if input_string < input_criteria['min_value']:
error_dict['failed_test'] = 'min_value'
error_dict['error_code'] = 4022
raise InputValidationError(error_dict)
if 'max_value' in input_criteria.keys():
if input_string > input_criteria['max_value']:
error_dict['failed_test'] = 'max_value'
error_dict['error_code'] = 4023
raise InputValidationError(error_dict)
if 'greater_than' in input_criteria.keys():
if input_string <= input_criteria['greater_than']:
error_dict['failed_test'] = 'greater_than'
error_dict['error_code'] = 4024
raise InputValidationError(error_dict)
if 'less_than' in input_criteria.keys():
if input_string >= input_criteria['less_than']:
error_dict['failed_test'] = 'less_than'
error_dict['error_code'] = 4025
raise InputValidationError(error_dict)
if 'equal_to' in input_criteria.keys():
if input_string != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
if 'min_length' in input_criteria.keys():
if len(input_string) < input_criteria['min_length']:
error_dict['failed_test'] = 'min_length'
error_dict['error_code'] = 4012
raise InputValidationError(error_dict)
if 'max_length' in input_criteria.keys():
if len(input_string) > input_criteria['max_length']:
error_dict['failed_test'] = 'max_length'
error_dict['error_code'] = 4013
raise InputValidationError(error_dict)
if 'must_not_contain' in input_criteria.keys():
for regex in input_criteria['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(input_string):
error_dict['failed_test'] = 'must_not_contain'
error_dict['error_code'] = 4014
raise InputValidationError(error_dict)
if 'must_contain' in input_criteria.keys():
for regex in input_criteria['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(input_string):
error_dict['failed_test'] = 'must_contain'
error_dict['error_code'] = 4015
raise InputValidationError(error_dict)
if 'contains_either' in input_criteria.keys():
regex_match = False
for regex in input_criteria['contains_either']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(input_string):
regex_match = True
if not regex_match:
error_dict['failed_test'] = 'contains_either'
error_dict['error_code'] = 4016
raise InputValidationError(error_dict)
if 'discrete_values' in input_criteria.keys():
if input_string not in input_criteria['discrete_values']:
error_dict['failed_test'] = 'discrete_values'
error_dict['error_code'] = 4041
raise InputValidationError(error_dict)
if 'excluded_values' in input_criteria.keys():
if input_string in input_criteria['excluded_values']:
error_dict['failed_test'] = 'excluded_values'
error_dict['error_code'] = 4042
raise InputValidationError(error_dict)
# TODO: validate string against identical to reference
# TODO: run lambda function and call validation url
return input_string
def _validate_boolean(self, input_boolean, path_to_root, object_title=''):
'''
a helper method for validating properties of a boolean
:return: input_boolean
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_boolean,
'error_code': 4001
}
if 'equal_to' in input_criteria.keys():
if input_boolean != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
# TODO: validate boolean against identical to reference
# TODO: run lambda function and call validation url
return input_boolean
def _ingest_dict(self, input_dict, schema_dict, path_to_root):
'''
a helper method for ingesting keys, value pairs in a dictionary
:return: valid_dict
'''
valid_dict = {}
# construct path to root for rules
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
# iterate over keys in schema dict
for key, value in schema_dict.items():
key_path = path_to_root
if not key_path == '.':
key_path += '.'
key_path += key
rules_key_path = re.sub('\[\d+\]', '[0]', key_path)
value_match = False
if key in input_dict.keys():
value_index = self._datatype_classes.index(value.__class__)
value_type = self._datatype_names[value_index]
try:
v_index = self._datatype_classes.index(input_dict[key].__class__)
v_type = self._datatype_names[v_index]
if v_type == value_type:
value_match = True
except:
value_match = False
if value_match:
if value_type == 'null':
valid_dict[key] = input_dict[key]
elif value_type == 'boolean':
valid_dict[key] = self._ingest_boolean(input_dict[key], key_path)
elif value_type == 'number':
valid_dict[key] = self._ingest_number(input_dict[key], key_path)
elif value_type == 'string':
valid_dict[key] = self._ingest_string(input_dict[key], key_path)
elif value_type == 'map':
valid_dict[key] = self._ingest_dict(input_dict[key], schema_dict[key], key_path)
elif value_type == 'list':
valid_dict[key] = self._ingest_list(input_dict[key], schema_dict[key], key_path)
else:
value_type = self.keyMap[rules_key_path]['value_datatype']
if 'default_value' in self.keyMap[rules_key_path]:
valid_dict[key] = self.keyMap[rules_key_path]['default_value']
elif value_type == 'null':
valid_dict[key] = None
elif value_type == 'boolean':
valid_dict[key] = False
elif value_type == 'number':
valid_dict[key] = 0.0
if 'integer_data' in self.keyMap[rules_key_path].keys():
if self.keyMap[rules_key_path]['integer_data']:
valid_dict[key] = 0
elif value_type == 'string':
valid_dict[key] = ''
elif value_type == 'list':
valid_dict[key] = []
elif value_type == 'map':
valid_dict[key] = self._ingest_dict({}, schema_dict[key], key_path)
# add extra fields if set to True
if self.keyMap[rules_path_to_root]['extra_fields']:
for key, value in input_dict.items():
if key not in valid_dict.keys():
valid_dict[key] = value
return valid_dict
def _ingest_list(self, input_list, schema_list, path_to_root):
'''
a helper method for ingesting items in a list
:return: valid_list
'''
valid_list = []
# construct max list size
max_size = None
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'max_size' in self.keyMap[rules_path_to_root].keys():
if not self.keyMap[rules_path_to_root]['max_size']:
return valid_list
else:
max_size = self.keyMap[rules_path_to_root]['max_size']
# iterate over items in input list
if input_list:
rules_index = self._datatype_classes.index(schema_list[0].__class__)
rules_type = self._datatype_names[rules_index]
for i in range(len(input_list)):
item_path = '%s[%s]' % (path_to_root, i)
value_match = False
try:
item_index = self._datatype_classes.index(input_list[i].__class__)
item_type = self._datatype_names[item_index]
if item_type == rules_type:
value_match = True
except:
value_match = False
if value_match:
try:
if item_type == 'boolean':
valid_list.append(self._validate_boolean(input_list[i], item_path))
elif item_type == 'number':
valid_list.append(self._validate_number(input_list[i], item_path))
elif item_type == 'string':
valid_list.append(self._validate_string(input_list[i], item_path))
elif item_type == 'map':
valid_list.append(self._ingest_dict(input_list[i], schema_list[0], item_path))
elif item_type == 'list':
valid_list.append(self._ingest_list(input_list[i], schema_list[0], item_path))
except:
pass
if isinstance(max_size, int):
if len(valid_list) == max_size:
return valid_list
return valid_list
def _ingest_number(self, input_number, path_to_root):
'''
a helper method for ingesting a number
:return: valid_number
'''
valid_number = 0.0
try:
valid_number = self._validate_number(input_number, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_number = self.keyMap[rules_path_to_root]['default_value']
elif 'integer_data' in self.keyMap[rules_path_to_root].keys():
if self.keyMap[rules_path_to_root]['integer_data']:
valid_number = 0
return valid_number
def _ingest_string(self, input_string, path_to_root):
'''
a helper method for ingesting a string
:return: valid_string
'''
valid_string = ''
try:
valid_string = self._validate_string(input_string, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_string = self.keyMap[rules_path_to_root]['default_value']
return valid_string
def _ingest_boolean(self, input_boolean, path_to_root):
'''
a helper method for ingesting a boolean
:return: valid_boolean
'''
valid_boolean = False
try:
valid_boolean = self._validate_boolean(input_boolean, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_boolean = self.keyMap[rules_path_to_root]['default_value']
return valid_boolean
def _walk(self, path_to_root, record_dict):
'''
a helper method for finding the record endpoint from a path to root
:param path_to_root: string with dot path to root from
:param record_dict:
:return: list, dict, string, number, or boolean at path to root
'''
# split path to root into segments
item_pattern = re.compile('\d+\\]')
dot_pattern = re.compile('\\.|\\[')
path_segments = dot_pattern.split(path_to_root)
# construct empty fields
record_endpoints = []
# determine starting position
if not path_segments[0]:
path_segments.pop(0)
# define internal recursive function
def _walk_int(path_segments, record_dict):
record_endpoint = record_dict
for i in range(0, len(path_segments)):
if item_pattern.match(path_segments[i]):
for j in range(0, len(record_endpoint)):
if len(path_segments) == 2:
record_endpoints.append(record_endpoint[j])
else:
stop_chain = False
for x in range(0, i):
if item_pattern.match(path_segments[x]):
stop_chain = True
if not stop_chain:
shortened_segments = []
for z in range(i + 1, len(path_segments)):
shortened_segments.append(path_segments[z])
_walk_int(shortened_segments, record_endpoint[j])
else:
stop_chain = False
for y in range(0, i):
if item_pattern.match(path_segments[y]):
stop_chain = True
if not stop_chain:
if len(path_segments) == i + 1:
record_endpoints.append(record_endpoint[path_segments[i]])
else:
record_endpoint = record_endpoint[path_segments[i]]
# conduct recursive walk
_walk_int(path_segments, record_dict)
return record_endpoints
def validate(self, input_data, path_to_root='', object_title=''):
'''
a core method for validating input against the model
input_data is only returned if all data is valid
:param input_data: list, dict, string, number, or boolean to validate
:param path_to_root: [optional] string with dot-path of model component
:param object_title: [optional] string with name of input to validate
:return: input_data (or InputValidationError)
'''
__name__ = '%s.validate' % self.__class__.__name__
_path_arg = '%s(path_to_root="...")' % __name__
_title_arg = '%s(object_title="...")' % __name__
# validate input
copy_path = path_to_root
if path_to_root:
if not isinstance(path_to_root, str):
raise ModelValidationError('%s must be a string.' % _path_arg)
else:
if path_to_root[0] != '.':
copy_path = '.%s' % path_to_root
if not copy_path in self.keyMap.keys():
raise ModelValidationError('%s does not exist in components %s.' % (_path_arg.replace('...', path_to_root), self.keyMap.keys()))
else:
copy_path = '.'
if object_title:
if not isinstance(object_title, str):
raise ModelValidationError('%s must be a string' % _title_arg)
# construct generic error dictionary
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[copy_path],
'failed_test': 'value_datatype',
'input_path': copy_path,
'error_value': input_data,
'error_code': 4001
}
# determine value type of input data
try:
input_index = self._datatype_classes.index(input_data.__class__)
except:
error_dict['error_value'] = input_data.__class__.__name__
raise InputValidationError(error_dict)
input_type = self._datatype_names[input_index]
# validate input data type
if input_type != self.keyMap[copy_path]['value_datatype']:
raise InputValidationError(error_dict)
# run helper method appropriate to data type
if input_type == 'boolean':
input_data = self._validate_boolean(input_data, copy_path, object_title)
elif input_type == 'number':
input_data = self._validate_number(input_data, copy_path, object_title)
elif input_type == 'string':
input_data = self._validate_string(input_data, copy_path, object_title)
elif input_type == 'list':
schema_list = self._reconstruct(copy_path)
input_data = self._validate_list(input_data, schema_list, copy_path, object_title)
elif input_type == 'map':
schema_dict = self._reconstruct(copy_path)
input_data = self._validate_dict(input_data, schema_dict, copy_path, object_title)
return input_data
def ingest(self, **kwargs):
'''
a core method to ingest and validate arbitrary keyword data
**NOTE: data is always returned with this method**
for each key in the model, a value is returned according
to the following priority:
1. value in kwargs if field passes validation test
2. default value declared for the key in the model
3. empty value appropriate to datatype of key in the model
**NOTE: as long as a default value is provided for each key-
value, returned data will be model valid
**NOTE: if 'extra_fields' is True for a dictionary, the key-
value pair of all fields in kwargs which are not declared in
the model will also be added to the corresponding dictionary
data
**NOTE: if 'max_size' is declared for a list, method will
stop adding input to the list once it reaches max size
:param kwargs: key, value pairs
:return: dictionary with keys and value
'''
__name__ = '%s.ingest' % self.__class__.__name__
schema_dict = self.schema
path_to_root = '.'
valid_data = self._ingest_dict(kwargs, schema_dict, path_to_root)
return valid_data
def query(self, query_criteria, valid_record=None):
'''
a core method for querying model valid data with criteria
**NOTE: input is only returned if all fields & qualifiers are valid for model
:param query_criteria: dictionary with model field names and query qualifiers
:param valid_record: dictionary with model valid record
:return: boolean (or QueryValidationError)
an example of how to construct the query_criteria argument:
query_criteria = {
'.path.to.number': {
'min_value': 4.5
},
'.path.to.string': {
'must_contain': [ '\\regex' ]
}
}
**NOTE: for a full list of operators for query_criteria based upon field
datatype, see either the query-rules.json file or REFERENCE file
'''
__name__ = '%s.query' % self.__class__.__name__
_query_arg = '%s(query_criteria={...})' % __name__
_record_arg = '%s(valid_record={...})' % __name__
# validate input
if not isinstance(query_criteria, dict):
raise ModelValidationError('%s must be a dictionary.' % _query_arg)
# convert javascript dot_path to class dot_path
criteria_copy = {}
equal_fields = []
dot_fields = []
for key, value in query_criteria.items():
copy_key = key
if not key:
copy_key = '.'
else:
if key[0] != '.':
copy_key = '.%s' % key
dot_fields.append(copy_key)
criteria_copy[copy_key] = value
if value.__class__ in self._datatype_classes[0:4]:
criteria_copy[copy_key] = {
'equal_to': value
}
equal_fields.append(copy_key)
# validate query criteria against query rules
query_kwargs = {
'fields_dict': criteria_copy,
'fields_rules': self.queryRules,
'declared_value': False
}
try:
self._validate_fields(**query_kwargs)
except ModelValidationError as err:
message = err.error['message']
for field in equal_fields:
equal_error = 'field %s qualifier equal_to' % field
if message.find(equal_error) > -1:
message = message.replace(equal_error, 'field %s' % field)
break
field_pattern = re.compile('ield\s(\..*?)\s')
field_name = field_pattern.findall(message)
if field_name:
if field_name[0] in dot_fields:
def _replace_field(x):
return 'ield %s ' % x.group(1)[1:]
message = field_pattern.sub(_replace_field, message)
raise QueryValidationError(message)
# query test record
if valid_record:
if not isinstance(valid_record, dict):
raise ModelValidationError('%s must be a dictionary.' % _record_arg)
for key, value in criteria_copy.items():
eval_outcome = self._evaluate_field(valid_record, key, value)
if not eval_outcome:
return False
return True
|
collectiveacuity/jsonModel
|
jsonmodel/validators.py
|
jsonModel._walk
|
python
|
def _walk(self, path_to_root, record_dict):
'''
a helper method for finding the record endpoint from a path to root
:param path_to_root: string with dot path to root from
:param record_dict:
:return: list, dict, string, number, or boolean at path to root
'''
# split path to root into segments
item_pattern = re.compile('\d+\\]')
dot_pattern = re.compile('\\.|\\[')
path_segments = dot_pattern.split(path_to_root)
# construct empty fields
record_endpoints = []
# determine starting position
if not path_segments[0]:
path_segments.pop(0)
# define internal recursive function
def _walk_int(path_segments, record_dict):
record_endpoint = record_dict
for i in range(0, len(path_segments)):
if item_pattern.match(path_segments[i]):
for j in range(0, len(record_endpoint)):
if len(path_segments) == 2:
record_endpoints.append(record_endpoint[j])
else:
stop_chain = False
for x in range(0, i):
if item_pattern.match(path_segments[x]):
stop_chain = True
if not stop_chain:
shortened_segments = []
for z in range(i + 1, len(path_segments)):
shortened_segments.append(path_segments[z])
_walk_int(shortened_segments, record_endpoint[j])
else:
stop_chain = False
for y in range(0, i):
if item_pattern.match(path_segments[y]):
stop_chain = True
if not stop_chain:
if len(path_segments) == i + 1:
record_endpoints.append(record_endpoint[path_segments[i]])
else:
record_endpoint = record_endpoint[path_segments[i]]
# conduct recursive walk
_walk_int(path_segments, record_dict)
return record_endpoints
|
a helper method for finding the record endpoint from a path to root
:param path_to_root: string with dot path to root from
:param record_dict:
:return: list, dict, string, number, or boolean at path to root
|
train
|
https://github.com/collectiveacuity/jsonModel/blob/1ea64c36d78add3faa7b85ff82c5ec685458c940/jsonmodel/validators.py#L1414-L1468
| null |
class jsonModel(object):
__rules__ = jsonLoader('jsonmodel', 'models/model-rules.json')
def __init__(self, data_model, query_rules=None):
'''
a method for testing data model declaration & initializing the class
:param data_model: dictionary with json model architecture
:param query_rules: [optional] dictionary with valid field type qualifiers
:return: object with jsonModel methods
'''
# validate schema input
if not isinstance(data_model, dict):
raise ModelValidationError('Data model must be a dictionary.')
elif 'schema' not in data_model.keys():
raise ModelValidationError('Data model must have a schema key.')
elif not isinstance(data_model['schema'], dict):
raise ModelValidationError('Value for the data model "schema" field must be a dictionary.')
elif not data_model['schema']:
raise ModelValidationError('Data model "schema" field must not be empty.')
# construct base methods
from copy import deepcopy
data_model = deepcopy(data_model)
self.schema = data_model['schema']
model_map = mapModel(self.schema)
self.keyName = model_map.keyName
self.keyCriteria = model_map.keyCriteria
# construct protected type classes
self._datatype_names = mapModel._datatype_names
self._datatype_classes = mapModel._datatype_classes
# validate absence of item designators in keys
item_pattern = re.compile('\[\d+\]')
for i in range(len(self.keyName)):
patterns_found = item_pattern.findall(self.keyName[i])
if patterns_found:
for designator in patterns_found:
if designator != '[0]':
message = 'Key name for schema field %s must not contain the item designator pattern %s' % (self.keyName[i], designator)
raise ModelValidationError(message)
# validate existence of first item in list declarations
key_set = set(self.keyName)
for i in range(len(self.keyName)):
if self.keyCriteria[i]['value_datatype'] == 'list':
item_key = self.keyName[i] + '[0]'
if not item_key in key_set:
message = 'Schema field %s must declare an initial item for the list.' % self.keyName[i]
raise ModelValidationError(message)
# alter list requirement if first item is empty
else:
item_index = self.keyName.index(item_key)
if not self.keyCriteria[item_index]['required_field']:
self.keyCriteria[i]['required_field'] = False
# validate title input & construct title method
self.title = ''
if 'title' in data_model.keys():
if not isinstance(data_model['title'], str):
raise ModelValidationError('Value for model title must be a string.')
self.title = data_model['title']
# validate description input & construct description method
self.description = ''
if 'description' in data_model.keys():
if not isinstance(data_model['description'], str):
raise ModelValidationError('Value for model description must be a string.')
self.description = data_model['description']
# validate url input & construct title method
self.url = ''
if 'url' in data_model.keys():
if not isinstance(data_model['url'], str):
raise ModelValidationError('Value for model url must be a string.')
self.title = data_model['url']
# validate metadata input & construct metadata method
self.metadata = {}
if 'metadata' in data_model.keys():
if not isinstance(data_model['metadata'], dict):
raise ModelValidationError('Value for model metadata must be a dictionary.')
self.metadata = data_model['metadata']
# validate max size input & construct maxSize property
# self.maxSize = None
# if 'max_size' in data_model.keys():
# if not isinstance(data_model['max_size'], int):
# raise ModelValidationError('Value for model max_size must be a positive integer.')
# elif data_model['max_size'] < 0:
# raise ModelValidationError('Value for model max_size must be a positive integer.')
# elif data_model['max_size']:
# self.maxSize = data_model['max_size']
# validate components input & construct component property
self.components = {}
if 'components' in data_model.keys():
if not isinstance(data_model['components'], dict):
raise ModelValidationError('Value for model components must be a dictionary.')
self.components = self._validate_fields(data_model['components'], self.__rules__['components'])
# construct keyMap fields from key names and key criteria
self.keyMap = {}
for i in range(len(self.keyName)):
self.keyMap[self.keyName[i]] = self.keyCriteria[i]
for key, value in self.components.items():
# convert javascript dot_path to class dot_path
dot_key = ''
if not key:
dot_key = '.'
else:
if key[0] != '.':
dot_key = '.%s' % key
# add component declarations to keyMap
if key in self.keyMap.keys():
for k, v in self.components[key].items():
self.keyMap[key][k] = v
elif dot_key and dot_key in self.keyMap.keys():
for k, v in self.components[key].items():
self.keyMap[dot_key][k] = v
# validate default values in lists
self._validate_defaults(self.keyMap)
# construct queryRules property from class model rules
self.queryRules = {}
for key, value in self.__rules__['components'].items():
remove_from_query = [ 'required_field', 'default_value', 'example_values', 'field_title', 'field_position', 'field_description', 'field_metadata', 'extra_fields' ]
field_qualifiers = {
'value_exists': False
}
for k, v in value.items():
if k not in remove_from_query:
field_qualifiers[k] = v
self.queryRules[key] = field_qualifiers
# validate query rules input and replace queryRules property
if query_rules:
if not isinstance(query_rules, dict):
message = 'Value for query rules input must be a dictionary.'
raise ModelValidationError(message)
input_set = set(query_rules.keys())
req_set = set(self.queryRules.keys())
if input_set - req_set:
message = 'Query rules input may only have %s field key names.' % req_set
raise ModelValidationError(message)
elif req_set - input_set:
message = 'Query rules input must have all %s field key names.' % req_set
raise ModelValidationError(message)
for key in req_set:
if not isinstance(query_rules[key], dict):
message = 'Value for query rules %s field must be a dictionary.' % key
raise ModelValidationError(message)
input_qualifier_set = set(query_rules[key].keys())
req_qualifier_set = set(self.queryRules[key].keys())
if input_qualifier_set - req_qualifier_set:
message = 'Query rules field %s may only have qualifiers %s' % (key, req_qualifier_set)
raise ModelValidationError(message)
for k, v in query_rules[key].items():
if v.__class__ != self.queryRules[key][k].__class__:
qualifier_index = self._datatype_classes.index(self.queryRules[key][k].__class__)
qualifier_type = self._datatype_names[qualifier_index]
message = 'Value for query rules field %s qualifier %s must be a "%s" datatype.' % (key, k, qualifier_type)
raise ModelValidationError(message)
self.queryRules = query_rules
def _validate_fields(self, fields_dict, fields_rules, declared_value=True):
# validate key names in fields
for key, value in fields_dict.items():
# convert javascript dot_path to class dot_path
if not key:
key = '.'
else:
if key[0] != '.':
key = '.%s' % key
if key not in self.keyName:
raise ModelValidationError('Field %s is not a field declared in model schema.' % key)
elif not isinstance(value, dict):
raise ModelValidationError('Value for field %s must be a dictionary.' % key)
# validate field criteria are appropriate to field datatype
value_type = self.keyCriteria[self.keyName.index(key)]['value_datatype']
type_dict = {}
if value_type == 'string':
type_dict = fields_rules['.string_fields']
elif value_type == 'number':
type_dict = fields_rules['.number_fields']
elif value_type == 'boolean':
type_dict = fields_rules['.boolean_fields']
elif value_type == 'list':
type_dict = fields_rules['.list_fields']
elif value_type == 'map':
type_dict = fields_rules['.map_fields']
elif value_type == 'null':
type_dict = fields_rules['.null_fields']
if set(value.keys()) - set(type_dict.keys()):
raise ModelValidationError('Field %s may only have datatype %s qualifiers %s.' % (key, value_type, set(type_dict.keys())))
# validate criteria qualifier values are appropriate datatype
for k, v in value.items():
v_index = self._datatype_classes.index(v.__class__)
v_type = self._datatype_names[v_index]
qualifier_index = self._datatype_classes.index(type_dict[k].__class__)
qualifier_type = self._datatype_names[qualifier_index]
if v_type != qualifier_type:
message = 'Value for field %s qualifier %s must be a %s datatype.' % (key, k, qualifier_type)
raise ModelValidationError(message)
if qualifier_type == 'number':
if isinstance(type_dict[k], int):
if not isinstance(v, int):
message = 'Value for field %s qualifier %s must be an integer.' % (key, k)
raise ModelValidationError(message)
# validate internal logic of each qualifier value declaration
if k in ('must_not_contain', 'must_contain', 'contains_either'):
for item in v:
if not isinstance(item, str):
message = 'Each item in list field %s qualifier %s must be a string.' % (key, k)
raise ModelValidationError(message)
if k in ('min_length', 'max_length', 'min_size', 'max_size'):
if v < 0:
message = 'Value for field %s qualifier %s cannot be negative.' % (key, k)
raise ModelValidationError(message)
if k in ('discrete_values', 'excluded_values', 'example_values'):
for item in v:
if value_type == 'number':
if not isinstance(item, int) and not isinstance(item, float):
message = 'Each item in field %s qualifier %s list must be a number.' % (key, k)
raise ModelValidationError(message)
elif not isinstance(item, str):
message = 'Each item in list for field %s qualifier %s must be a string.' % (key, k)
raise ModelValidationError(message)
if k == 'identical_to':
if not v in self.keyName:
message = 'Value "%s" for field %s qualifier %s not found in components keys.' % (v, key, k)
raise ModelValidationError(message)
if k == 'unique_values':
if v:
item_name = key + '[0]'
item_type = self.keyCriteria[self.keyName.index(item_name)]['value_datatype']
if not item_type in ('number', 'string'):
message = 'Field %s[0] must be either a string or number if qualifier "unique_values": true' % key
raise ModelValidationError(message)
# validate lack of other qualifiers if value exist is false
if 'value_exists' in value.keys():
if not value['value_exists']:
if set(value.keys()) - {'value_exists'}:
message = 'If field %s qualifier value_exists: false, field may not have other qualifiers.' % key
raise ModelValidationError(message)
# validate size qualifiers against each other
size_qualifiers = ['min_size', 'max_size']
for qualifier in size_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
header = 'Value %s for %s' % (test_value, value_path)
if 'min_size' in value.keys():
if test_value < value['min_size']:
message = '%s must not be less than "min_size": %s' % (header, value['min_size'])
raise ModelValidationError(message)
if 'max_size' in value.keys():
if test_value > value['max_size']:
message = '%s must not be greater than "max_size": %s' % (header, value['max_size'])
raise ModelValidationError(message)
# validate length qualifiers against each other
length_qualifiers = ['min_length', 'max_length']
for qualifier in length_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
header = 'Value %s for %s' % (test_value, value_path)
if 'min_length' in value.keys():
if test_value < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if test_value > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
# validate range qualifiers against each other & length qualifiers
range_qualifiers = ['min_value', 'max_value', 'greater_than', 'less_than', 'equal_to']
for qualifier in range_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
header = 'Value %s%s%s for %s' % (quote_text, test_value, quote_text, value_path)
if 'min_value' in value.keys():
if test_value < value['min_value']:
message = '%s must not be less than "min_value": %s' % (header, value['min_value'])
raise ModelValidationError(message)
if 'max_value' in value.keys():
if test_value > value['max_value']:
message = '%s must not be greater than "max_value": %s' % (header, value['max_value'])
raise ModelValidationError(message)
if 'greater_than' in value.keys():
if test_value <= value['greater_than'] and not qualifier == 'greater_than':
message = '%s must be "greater_than": %s' % (header, value['greater_than'])
raise ModelValidationError(message)
if 'less_than' in value.keys():
if test_value >= value['less_than'] and not qualifier == 'less_than':
message = '%s must be "less_than": %s' % (header, value['less_than'])
raise ModelValidationError(message)
if 'min_length' in value.keys():
if len(test_value) < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if len(test_value) > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
if 'integer_data' in value.keys():
if value['integer_data']:
if not isinstance(test_value, int):
message = '%s must be an "integer_data".' % header
raise ModelValidationError(message)
if 'must_not_contain' in value.keys():
for regex in value['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
message = '%s matches regex pattern in "must_not_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'must_contain' in value.keys():
for regex in value['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(test_value):
message = '%s does not match regex pattern in "must_contain": ["%s"].' % (header, regex)
raise ModelValidationError(message)
if 'contains_either' in value.keys():
regex_match = False
regex_patterns = []
for regex in value['contains_either']:
regex_patterns.append(regex)
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
regex_match = True
if not regex_match:
message = '%s does not match any regex patterns in "contains_either": %s' % (header, regex_patterns)
raise ModelValidationError(message)
if 'byte_data' in value.keys():
if value['byte_data']:
if qualifier != 'equal_to':
message = '%s cannot be used with base64 encoded "byte_data".' % header
raise ModelValidationError(message)
# validate discrete value qualifiers against other criteria
schema_field = self.keyCriteria[self.keyName.index(key)]
discrete_qualifiers = ['declared_value', 'default_value', 'excluded_values', 'discrete_values', 'example_values']
for qualifier in discrete_qualifiers:
test_qualifier = False
if qualifier in schema_field:
test_qualifier = True
if qualifier == 'declared_value' and not schema_field[qualifier]:
test_qualifier = False
if qualifier in value.keys() or (test_qualifier and declared_value):
multiple_values = False
if qualifier in value.keys():
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
else:
test_list = [schema_field[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
header = 'Value %s%s%s for %s%s' % (quote_text, test_value, quote_text, value_path, item_text)
if 'min_value' in value.keys():
if test_value < value['min_value']:
message = '%s must not be less than "min_value": %s' % (header, value['min_value'])
raise ModelValidationError(message)
if 'max_value' in value.keys():
if test_value > value['max_value']:
message = '%s must not be greater than "max_value": %s' % (header, value['max_value'])
raise ModelValidationError(message)
if 'equal_to' in value.keys():
if test_value != value['equal_to']:
if qualifier != 'declared_value' and isinstance(test_value, bool):
message = '%s must be "equal_to": %s' % (header, value['equal_to'])
raise ModelValidationError(message)
if 'greater_than' in value.keys():
if test_value <= value['greater_than']:
message = '%s must be "greater_than": %s' % (header, value['greater_than'])
raise ModelValidationError(message)
if 'less_than' in value.keys():
if test_value >= value['less_than']:
message = '%s must be "less_than": %s' % (header, value['less_than'])
raise ModelValidationError(message)
if 'integer_data' in value.keys():
if value['integer_data']:
if not isinstance(test_value, int):
message = '%s must be an "integer_data".' % header
raise ModelValidationError(message)
if 'min_length' in value.keys():
if len(test_value) < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if len(test_value) > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
if 'must_not_contain' in value.keys():
for regex in value['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
message = '%s matches regex pattern in "must_not_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'must_contain' in value.keys():
for regex in value['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(test_value):
message = '%s does not match regex pattern in "must_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'contains_either' in value.keys():
regex_match = False
regex_patterns = []
for regex in value['contains_either']:
regex_patterns.append(regex)
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
regex_match = True
if not regex_match:
message = '%s does not match any regex patterns in "contains_either": %s' % (header, regex_patterns)
raise ModelValidationError(message)
if 'byte_data' in value.keys():
message = '%s cannot be base64 decoded to "byte_data".' % header
try:
decoded_bytes = b64decode(test_value)
except:
raise ModelValidationError(message)
if not isinstance(decoded_bytes, bytes):
raise ModelValidationError(message)
# validate discrete value qualifiers against each other
for qualifier in discrete_qualifiers:
test_qualifier = False
if qualifier in schema_field:
test_qualifier = True
if qualifier == 'declared_value' and not schema_field[qualifier]:
test_qualifier = False
if qualifier in value.keys() or (test_qualifier and declared_value):
multiple_values = False
if qualifier in value.keys():
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
else:
test_list = [schema_field[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
header = 'Value %s%s%s for %s%s' % (quote_text, test_value, quote_text, value_path, item_text)
if 'excluded_values' in value.keys():
if not qualifier == 'excluded_values':
if test_value in value['excluded_values']:
message = '%s cannot be one of "excluded_values": %s.' % (header, value['excluded_values'])
raise ModelValidationError(message)
if 'discrete_values' in value.keys():
if not qualifier == 'excluded_values':
if test_value not in value['discrete_values']:
message = '%s must be one of "discrete_values": %s' % (header, value['discrete_values'])
raise ModelValidationError(message)
return fields_dict
def _validate_defaults(self, fields_dict):
# validate key names in fields
for key, value in fields_dict.items():
# retrieve value type and type dict
value_type = self.keyCriteria[self.keyName.index(key)]['value_datatype']
# validate discrete value qualifiers against other criteria
qualifier = 'default_value'
if qualifier in value.keys():
multiple_values = False
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
qualifier_text = value_path + item_text
header = 'Value %s%s%s for %s' % (quote_text, test_value, quote_text, qualifier_text)
if value_type == 'list':
default_item_key = '%s[0]' % key
try:
self.validate(test_value, default_item_key, object_title=header)
except Exception as err:
raise ModelValidationError(str(err).strip().replace('field %s' % default_item_key, qualifier_text))
return fields_dict
def _evaluate_field(self, record_dict, field_name, field_criteria):
''' a helper method for evaluating record values based upon query criteria
:param record_dict: dictionary with model valid data to evaluate
:param field_name: string with path to root of query field
:param field_criteria: dictionary with query operators and qualifiers
:return: boolean (True if no field_criteria evaluate to false)
'''
# determine value existence criteria
value_exists = True
if 'value_exists' in field_criteria.keys():
if not field_criteria['value_exists']:
value_exists = False
# validate existence of field
field_exists = True
try:
record_values = self._walk(field_name, record_dict)
except:
field_exists = False
# evaluate existence query criteria
if value_exists != field_exists:
return False
elif not value_exists:
return True
# convert javascript dot_path to class dot_path
field_key = field_name
if not field_name:
field_key = '.'
else:
if field_name[0] != '.':
field_key = '.%s' % field_name
# evaluate other query criteria
for key, value in field_criteria.items():
if key in ('min_size', 'min_length'):
found = False
if self.keyMap[field_key]['value_datatype'] == 'map':
for record_value in record_values:
record_size = sys.getsizeof(json.dumps(str(record_value)).replace(' ','')) - 51
if record_size >= value:
found = True
break
else:
for record_value in record_values:
if len(record_value) >= value:
found = True
break
if not found:
return False
elif key in ('max_size', 'max_length'):
found = False
if self.keyMap[field_key]['value_datatype'] == 'map':
for record_value in record_values:
record_size = sys.getsizeof(json.dumps(str(record_value)).replace(' ','')) - 51
if record_size <= value:
found = True
break
else:
for record_value in record_values:
if len(record_value) <= value:
found = True
break
if not found:
return False
elif key == 'min_value':
found = False
for record_value in record_values:
if record_value >= value:
found = True
break
if not found:
return False
elif key == 'max_value':
found = False
for record_value in record_values:
if record_value <= value:
found = True
break
if not found:
return False
elif key == 'equal_to':
found = False
for record_value in record_values:
if record_value == value:
found = True
break
if not found:
return False
elif key == 'greater_than':
found = False
for record_value in record_values:
if record_value > value:
found = True
break
if not found:
return False
elif key == 'less_than':
found = False
for record_value in record_values:
if record_value < value:
found = True
break
if not found:
return False
elif key == 'excluded_values':
for record_value in record_values:
if record_value in value:
return False
elif key == 'discrete_values':
found = False
for record_value in record_values:
if record_value in value:
found = True
break
if not found:
return False
elif key == 'integer_data':
found = False
dummy_int = 1
for record_value in record_values:
if record_value.__class__ == dummy_int.__class__:
found = True
break
if value != found:
return False
elif key == 'byte_data':
found = False
for record_value in record_values:
try:
decoded_bytes = b64decode(record_value)
except:
decoded_bytes = ''
if isinstance(decoded_bytes, bytes):
found = True
break
if value != found:
return False
elif key == 'must_contain':
for regex in value:
regex_pattern = re.compile(regex)
found = False
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if not found:
return False
elif key == 'must_not_contain':
for regex in value:
regex_pattern = re.compile(regex)
found = False
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if found:
return False
elif key == 'contains_either':
found = False
for regex in value:
regex_pattern = re.compile(regex)
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if found:
break
if not found:
return False
elif key == 'unique_values':
for record_value in record_values:
unique_values = True
if len(record_value) != len(set(record_value)):
unique_values = False
if value != unique_values:
return False
return True
def _validate_dict(self, input_dict, schema_dict, path_to_root, object_title=''):
''' a helper method for recursively validating keys in dictionaries
:return input_dict
'''
# reconstruct key path to current dictionary in model
rules_top_level_key = re.sub('\[\d+\]', '[0]', path_to_root)
map_rules = self.keyMap[rules_top_level_key]
# construct list error report template
map_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': map_rules,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': 0,
'error_code': 4001
}
# validate map size
if 'min_size' in map_rules.keys():
input_size = sys.getsizeof(json.dumps(str(input_dict)).replace(' ','')) - 51
if input_size < map_rules['min_size']:
map_error['failed_test'] = 'min_size'
map_error['error_value'] = input_size
map_error['error_code'] = 4031
raise InputValidationError(map_error)
if 'max_size' in map_rules.keys():
input_size = sys.getsizeof(json.dumps(str(input_dict)).replace(' ','')) - 51
if input_size > map_rules['max_size']:
map_error['failed_test'] = 'max_size'
map_error['error_value'] = input_size
map_error['error_code'] = 4032
raise InputValidationError(map_error)
# construct lists of keys in input dictionary
input_keys = []
input_key_list = []
for key in input_dict.keys():
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'key_datatype',
'input_path': path_to_root,
'error_value': key,
'error_code': 4004
}
error_dict['input_criteria']['key_datatype'] = 'string'
if path_to_root == '.':
if not isinstance(key, str):
input_key_name = path_to_root + str(key)
error_dict['input_path'] = input_key_name
raise InputValidationError(error_dict)
input_key_name = path_to_root + key
else:
if not isinstance(key, str):
input_key_name = path_to_root + '.' + str(key)
error_dict['input_path'] = input_key_name
raise InputValidationError(error_dict)
input_key_name = path_to_root + '.' + key
input_keys.append(input_key_name)
input_key_list.append(key)
# TODO: validate top-level key and values against identical to reference
# TODO: run lambda function and call validation
# construct lists of keys in schema dictionary
max_keys = []
max_key_list = []
req_keys = []
req_key_list = []
for key in schema_dict.keys():
if path_to_root == '.':
schema_key_name = path_to_root + key
else:
schema_key_name = path_to_root + '.' + key
max_keys.append(schema_key_name)
max_key_list.append(key)
rules_schema_key_name = re.sub('\[\d+\]', '[0]', schema_key_name)
if self.keyMap[rules_schema_key_name]['required_field']:
req_keys.append(schema_key_name)
req_key_list.append(key)
# validate existence of required fields
missing_keys = set(req_keys) - set(input_keys)
if missing_keys:
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'required_field',
'input_path': path_to_root,
'error_value': list(missing_keys),
'error_code': 4002
}
error_dict['input_criteria']['required_keys'] = req_keys
raise InputValidationError(error_dict)
# validate existence of extra fields
extra_keys = set(input_keys) - set(max_keys)
if extra_keys and not self.keyMap[rules_top_level_key]['extra_fields']:
extra_key_list = []
for key in extra_keys:
pathless_key = re.sub(rules_top_level_key, '', key, count=1)
extra_key_list.append(pathless_key)
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'extra_fields',
'input_path': path_to_root,
'error_value': extra_key_list,
'error_code': 4003
}
error_dict['input_criteria']['maximum_scope'] = max_key_list
raise InputValidationError(error_dict)
# validate datatype of value
for key, value in input_dict.items():
if path_to_root == '.':
input_key_name = path_to_root + key
else:
input_key_name = path_to_root + '.' + key
rules_input_key_name = re.sub('\[\d+\]', '[0]', input_key_name)
if input_key_name in max_keys:
input_criteria = self.keyMap[rules_input_key_name]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': input_key_name,
'error_value': value,
'error_code': 4001
}
try:
value_index = self._datatype_classes.index(value.__class__)
except:
error_dict['error_value'] = value.__class__.__name__
raise InputValidationError(error_dict)
value_type = self._datatype_names[value_index]
if input_criteria['value_datatype'] == 'null':
pass
else:
if value_type != input_criteria['value_datatype']:
raise InputValidationError(error_dict)
# call appropriate validation sub-routine for datatype of value
if value_type == 'boolean':
input_dict[key] = self._validate_boolean(value, input_key_name, object_title)
elif value_type == 'number':
input_dict[key] = self._validate_number(value, input_key_name, object_title)
elif value_type == 'string':
input_dict[key] = self._validate_string(value, input_key_name, object_title)
elif value_type == 'map':
input_dict[key] = self._validate_dict(value, schema_dict[key], input_key_name, object_title)
elif value_type == 'list':
input_dict[key] = self._validate_list(value, schema_dict[key], input_key_name, object_title)
# set default values for empty optional fields
for key in max_key_list:
if key not in input_key_list:
indexed_key = max_keys[max_key_list.index(key)]
if indexed_key in self.components.keys():
if 'default_value' in self.components[indexed_key]:
input_dict[key] = self.components[indexed_key]['default_value']
return input_dict
def _validate_list(self, input_list, schema_list, path_to_root, object_title=''):
'''
a helper method for recursively validating items in a list
:return: input_list
'''
# construct rules for list and items
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
list_rules = self.keyMap[rules_path_to_root]
initial_key = rules_path_to_root + '[0]'
item_rules = self.keyMap[initial_key]
# construct list error report template
list_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': list_rules,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': 0,
'error_code': 4001
}
# validate list rules
if 'min_size' in list_rules.keys():
if len(input_list) < list_rules['min_size']:
list_error['failed_test'] = 'min_size'
list_error['error_value'] = len(input_list)
list_error['error_code'] = 4031
raise InputValidationError(list_error)
if 'max_size' in list_rules.keys():
if len(input_list) > list_rules['max_size']:
list_error['failed_test'] = 'max_size'
list_error['error_value'] = len(input_list)
list_error['error_code'] = 4032
raise InputValidationError(list_error)
# construct item error report template
item_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': item_rules,
'failed_test': 'value_datatype',
'input_path': initial_key,
'error_value': None,
'error_code': 4001
}
# validate datatype of items
for i in range(len(input_list)):
input_path = path_to_root + '[%s]' % i
item = input_list[i]
item_error['input_path'] = input_path
try:
item_index = self._datatype_classes.index(item.__class__)
except:
item_error['error_value'] = item.__class__.__name__
raise InputValidationError(item_error)
item_type = self._datatype_names[item_index]
item_error['error_value'] = item
if item_rules['value_datatype'] == 'null':
pass
else:
if item_type != item_rules['value_datatype']:
raise InputValidationError(item_error)
# call appropriate validation sub-routine for datatype of item
if item_type == 'boolean':
input_list[i] = self._validate_boolean(item, input_path, object_title)
elif item_type == 'number':
input_list[i] = self._validate_number(item, input_path, object_title)
elif item_type == 'string':
input_list[i] = self._validate_string(item, input_path, object_title)
elif item_type == 'map':
input_list[i] = self._validate_dict(item, schema_list[0], input_path, object_title)
elif item_type == 'list':
input_list[i] = self._validate_list(item, schema_list[0], input_path, object_title)
# validate unique values in list
if 'unique_values' in list_rules.keys():
if len(set(input_list)) < len(input_list):
list_error['failed_test'] = 'unique_values'
list_error['error_value'] = input_list
list_error['error_code'] = 4033
raise InputValidationError(list_error)
# TODO: validate top-level item values against identical to reference
# TODO: run lambda function and call validation url
return input_list
def _validate_number(self, input_number, path_to_root, object_title=''):
'''
a helper method for validating properties of a number
:return: input_number
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_number,
'error_code': 4001
}
if 'integer_data' in input_criteria.keys():
if input_criteria['integer_data'] and not isinstance(input_number, int):
error_dict['failed_test'] = 'integer_data'
error_dict['error_code'] = 4021
raise InputValidationError(error_dict)
if 'min_value' in input_criteria.keys():
if input_number < input_criteria['min_value']:
error_dict['failed_test'] = 'min_value'
error_dict['error_code'] = 4022
raise InputValidationError(error_dict)
if 'max_value' in input_criteria.keys():
if input_number > input_criteria['max_value']:
error_dict['failed_test'] = 'max_value'
error_dict['error_code'] = 4023
raise InputValidationError(error_dict)
if 'greater_than' in input_criteria.keys():
if input_number <= input_criteria['greater_than']:
error_dict['failed_test'] = 'greater_than'
error_dict['error_code'] = 4024
raise InputValidationError(error_dict)
if 'less_than' in input_criteria.keys():
if input_number >= input_criteria['less_than']:
error_dict['failed_test'] = 'less_than'
error_dict['error_code'] = 4025
raise InputValidationError(error_dict)
if 'equal_to' in input_criteria.keys():
if input_number != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
if 'discrete_values' in input_criteria.keys():
if input_number not in input_criteria['discrete_values']:
error_dict['failed_test'] = 'discrete_values'
error_dict['error_code'] = 4041
raise InputValidationError(error_dict)
if 'excluded_values' in input_criteria.keys():
if input_number in input_criteria['excluded_values']:
error_dict['failed_test'] = 'excluded_values'
error_dict['error_code'] = 4042
raise InputValidationError(error_dict)
# TODO: validate number against identical to reference
# TODO: run lambda function and call validation url
return input_number
def _validate_string(self, input_string, path_to_root, object_title=''):
'''
a helper method for validating properties of a string
:return: input_string
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_string,
'error_code': 4001
}
if 'byte_data' in input_criteria.keys():
if input_criteria['byte_data']:
error_dict['failed_test'] = 'byte_data'
error_dict['error_code'] = 4011
try:
decoded_bytes = b64decode(input_string)
except:
raise InputValidationError(error_dict)
if not isinstance(decoded_bytes, bytes):
raise InputValidationError(error_dict)
if 'min_value' in input_criteria.keys():
if input_string < input_criteria['min_value']:
error_dict['failed_test'] = 'min_value'
error_dict['error_code'] = 4022
raise InputValidationError(error_dict)
if 'max_value' in input_criteria.keys():
if input_string > input_criteria['max_value']:
error_dict['failed_test'] = 'max_value'
error_dict['error_code'] = 4023
raise InputValidationError(error_dict)
if 'greater_than' in input_criteria.keys():
if input_string <= input_criteria['greater_than']:
error_dict['failed_test'] = 'greater_than'
error_dict['error_code'] = 4024
raise InputValidationError(error_dict)
if 'less_than' in input_criteria.keys():
if input_string >= input_criteria['less_than']:
error_dict['failed_test'] = 'less_than'
error_dict['error_code'] = 4025
raise InputValidationError(error_dict)
if 'equal_to' in input_criteria.keys():
if input_string != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
if 'min_length' in input_criteria.keys():
if len(input_string) < input_criteria['min_length']:
error_dict['failed_test'] = 'min_length'
error_dict['error_code'] = 4012
raise InputValidationError(error_dict)
if 'max_length' in input_criteria.keys():
if len(input_string) > input_criteria['max_length']:
error_dict['failed_test'] = 'max_length'
error_dict['error_code'] = 4013
raise InputValidationError(error_dict)
if 'must_not_contain' in input_criteria.keys():
for regex in input_criteria['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(input_string):
error_dict['failed_test'] = 'must_not_contain'
error_dict['error_code'] = 4014
raise InputValidationError(error_dict)
if 'must_contain' in input_criteria.keys():
for regex in input_criteria['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(input_string):
error_dict['failed_test'] = 'must_contain'
error_dict['error_code'] = 4015
raise InputValidationError(error_dict)
if 'contains_either' in input_criteria.keys():
regex_match = False
for regex in input_criteria['contains_either']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(input_string):
regex_match = True
if not regex_match:
error_dict['failed_test'] = 'contains_either'
error_dict['error_code'] = 4016
raise InputValidationError(error_dict)
if 'discrete_values' in input_criteria.keys():
if input_string not in input_criteria['discrete_values']:
error_dict['failed_test'] = 'discrete_values'
error_dict['error_code'] = 4041
raise InputValidationError(error_dict)
if 'excluded_values' in input_criteria.keys():
if input_string in input_criteria['excluded_values']:
error_dict['failed_test'] = 'excluded_values'
error_dict['error_code'] = 4042
raise InputValidationError(error_dict)
# TODO: validate string against identical to reference
# TODO: run lambda function and call validation url
return input_string
def _validate_boolean(self, input_boolean, path_to_root, object_title=''):
'''
a helper method for validating properties of a boolean
:return: input_boolean
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_boolean,
'error_code': 4001
}
if 'equal_to' in input_criteria.keys():
if input_boolean != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
# TODO: validate boolean against identical to reference
# TODO: run lambda function and call validation url
return input_boolean
def _ingest_dict(self, input_dict, schema_dict, path_to_root):
'''
a helper method for ingesting keys, value pairs in a dictionary
:return: valid_dict
'''
valid_dict = {}
# construct path to root for rules
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
# iterate over keys in schema dict
for key, value in schema_dict.items():
key_path = path_to_root
if not key_path == '.':
key_path += '.'
key_path += key
rules_key_path = re.sub('\[\d+\]', '[0]', key_path)
value_match = False
if key in input_dict.keys():
value_index = self._datatype_classes.index(value.__class__)
value_type = self._datatype_names[value_index]
try:
v_index = self._datatype_classes.index(input_dict[key].__class__)
v_type = self._datatype_names[v_index]
if v_type == value_type:
value_match = True
except:
value_match = False
if value_match:
if value_type == 'null':
valid_dict[key] = input_dict[key]
elif value_type == 'boolean':
valid_dict[key] = self._ingest_boolean(input_dict[key], key_path)
elif value_type == 'number':
valid_dict[key] = self._ingest_number(input_dict[key], key_path)
elif value_type == 'string':
valid_dict[key] = self._ingest_string(input_dict[key], key_path)
elif value_type == 'map':
valid_dict[key] = self._ingest_dict(input_dict[key], schema_dict[key], key_path)
elif value_type == 'list':
valid_dict[key] = self._ingest_list(input_dict[key], schema_dict[key], key_path)
else:
value_type = self.keyMap[rules_key_path]['value_datatype']
if 'default_value' in self.keyMap[rules_key_path]:
valid_dict[key] = self.keyMap[rules_key_path]['default_value']
elif value_type == 'null':
valid_dict[key] = None
elif value_type == 'boolean':
valid_dict[key] = False
elif value_type == 'number':
valid_dict[key] = 0.0
if 'integer_data' in self.keyMap[rules_key_path].keys():
if self.keyMap[rules_key_path]['integer_data']:
valid_dict[key] = 0
elif value_type == 'string':
valid_dict[key] = ''
elif value_type == 'list':
valid_dict[key] = []
elif value_type == 'map':
valid_dict[key] = self._ingest_dict({}, schema_dict[key], key_path)
# add extra fields if set to True
if self.keyMap[rules_path_to_root]['extra_fields']:
for key, value in input_dict.items():
if key not in valid_dict.keys():
valid_dict[key] = value
return valid_dict
def _ingest_list(self, input_list, schema_list, path_to_root):
'''
a helper method for ingesting items in a list
:return: valid_list
'''
valid_list = []
# construct max list size
max_size = None
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'max_size' in self.keyMap[rules_path_to_root].keys():
if not self.keyMap[rules_path_to_root]['max_size']:
return valid_list
else:
max_size = self.keyMap[rules_path_to_root]['max_size']
# iterate over items in input list
if input_list:
rules_index = self._datatype_classes.index(schema_list[0].__class__)
rules_type = self._datatype_names[rules_index]
for i in range(len(input_list)):
item_path = '%s[%s]' % (path_to_root, i)
value_match = False
try:
item_index = self._datatype_classes.index(input_list[i].__class__)
item_type = self._datatype_names[item_index]
if item_type == rules_type:
value_match = True
except:
value_match = False
if value_match:
try:
if item_type == 'boolean':
valid_list.append(self._validate_boolean(input_list[i], item_path))
elif item_type == 'number':
valid_list.append(self._validate_number(input_list[i], item_path))
elif item_type == 'string':
valid_list.append(self._validate_string(input_list[i], item_path))
elif item_type == 'map':
valid_list.append(self._ingest_dict(input_list[i], schema_list[0], item_path))
elif item_type == 'list':
valid_list.append(self._ingest_list(input_list[i], schema_list[0], item_path))
except:
pass
if isinstance(max_size, int):
if len(valid_list) == max_size:
return valid_list
return valid_list
def _ingest_number(self, input_number, path_to_root):
'''
a helper method for ingesting a number
:return: valid_number
'''
valid_number = 0.0
try:
valid_number = self._validate_number(input_number, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_number = self.keyMap[rules_path_to_root]['default_value']
elif 'integer_data' in self.keyMap[rules_path_to_root].keys():
if self.keyMap[rules_path_to_root]['integer_data']:
valid_number = 0
return valid_number
def _ingest_string(self, input_string, path_to_root):
'''
a helper method for ingesting a string
:return: valid_string
'''
valid_string = ''
try:
valid_string = self._validate_string(input_string, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_string = self.keyMap[rules_path_to_root]['default_value']
return valid_string
def _ingest_boolean(self, input_boolean, path_to_root):
'''
a helper method for ingesting a boolean
:return: valid_boolean
'''
valid_boolean = False
try:
valid_boolean = self._validate_boolean(input_boolean, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_boolean = self.keyMap[rules_path_to_root]['default_value']
return valid_boolean
def _reconstruct(self, path_to_root):
'''
a helper method for finding the schema endpoint from a path to root
:param path_to_root: string with dot path to root from
:return: list, dict, string, number, or boolean at path to root
'''
# split path to root into segments
item_pattern = re.compile('\d+\\]')
dot_pattern = re.compile('\\.|\\[')
path_segments = dot_pattern.split(path_to_root)
# construct base schema endpoint
schema_endpoint = self.schema
# reconstruct schema endpoint from segments
if path_segments[1]:
for i in range(1,len(path_segments)):
if item_pattern.match(path_segments[i]):
schema_endpoint = schema_endpoint[0]
else:
schema_endpoint = schema_endpoint[path_segments[i]]
return schema_endpoint
def _walk(self, path_to_root, record_dict):
'''
a helper method for finding the record endpoint from a path to root
:param path_to_root: string with dot path to root from
:param record_dict:
:return: list, dict, string, number, or boolean at path to root
'''
# split path to root into segments
item_pattern = re.compile('\d+\\]')
dot_pattern = re.compile('\\.|\\[')
path_segments = dot_pattern.split(path_to_root)
# construct empty fields
record_endpoints = []
# determine starting position
if not path_segments[0]:
path_segments.pop(0)
# define internal recursive function
def _walk_int(path_segments, record_dict):
record_endpoint = record_dict
for i in range(0, len(path_segments)):
if item_pattern.match(path_segments[i]):
for j in range(0, len(record_endpoint)):
if len(path_segments) == 2:
record_endpoints.append(record_endpoint[j])
else:
stop_chain = False
for x in range(0, i):
if item_pattern.match(path_segments[x]):
stop_chain = True
if not stop_chain:
shortened_segments = []
for z in range(i + 1, len(path_segments)):
shortened_segments.append(path_segments[z])
_walk_int(shortened_segments, record_endpoint[j])
else:
stop_chain = False
for y in range(0, i):
if item_pattern.match(path_segments[y]):
stop_chain = True
if not stop_chain:
if len(path_segments) == i + 1:
record_endpoints.append(record_endpoint[path_segments[i]])
else:
record_endpoint = record_endpoint[path_segments[i]]
# conduct recursive walk
_walk_int(path_segments, record_dict)
return record_endpoints
def validate(self, input_data, path_to_root='', object_title=''):
'''
a core method for validating input against the model
input_data is only returned if all data is valid
:param input_data: list, dict, string, number, or boolean to validate
:param path_to_root: [optional] string with dot-path of model component
:param object_title: [optional] string with name of input to validate
:return: input_data (or InputValidationError)
'''
__name__ = '%s.validate' % self.__class__.__name__
_path_arg = '%s(path_to_root="...")' % __name__
_title_arg = '%s(object_title="...")' % __name__
# validate input
copy_path = path_to_root
if path_to_root:
if not isinstance(path_to_root, str):
raise ModelValidationError('%s must be a string.' % _path_arg)
else:
if path_to_root[0] != '.':
copy_path = '.%s' % path_to_root
if not copy_path in self.keyMap.keys():
raise ModelValidationError('%s does not exist in components %s.' % (_path_arg.replace('...', path_to_root), self.keyMap.keys()))
else:
copy_path = '.'
if object_title:
if not isinstance(object_title, str):
raise ModelValidationError('%s must be a string' % _title_arg)
# construct generic error dictionary
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[copy_path],
'failed_test': 'value_datatype',
'input_path': copy_path,
'error_value': input_data,
'error_code': 4001
}
# determine value type of input data
try:
input_index = self._datatype_classes.index(input_data.__class__)
except:
error_dict['error_value'] = input_data.__class__.__name__
raise InputValidationError(error_dict)
input_type = self._datatype_names[input_index]
# validate input data type
if input_type != self.keyMap[copy_path]['value_datatype']:
raise InputValidationError(error_dict)
# run helper method appropriate to data type
if input_type == 'boolean':
input_data = self._validate_boolean(input_data, copy_path, object_title)
elif input_type == 'number':
input_data = self._validate_number(input_data, copy_path, object_title)
elif input_type == 'string':
input_data = self._validate_string(input_data, copy_path, object_title)
elif input_type == 'list':
schema_list = self._reconstruct(copy_path)
input_data = self._validate_list(input_data, schema_list, copy_path, object_title)
elif input_type == 'map':
schema_dict = self._reconstruct(copy_path)
input_data = self._validate_dict(input_data, schema_dict, copy_path, object_title)
return input_data
def ingest(self, **kwargs):
'''
a core method to ingest and validate arbitrary keyword data
**NOTE: data is always returned with this method**
for each key in the model, a value is returned according
to the following priority:
1. value in kwargs if field passes validation test
2. default value declared for the key in the model
3. empty value appropriate to datatype of key in the model
**NOTE: as long as a default value is provided for each key-
value, returned data will be model valid
**NOTE: if 'extra_fields' is True for a dictionary, the key-
value pair of all fields in kwargs which are not declared in
the model will also be added to the corresponding dictionary
data
**NOTE: if 'max_size' is declared for a list, method will
stop adding input to the list once it reaches max size
:param kwargs: key, value pairs
:return: dictionary with keys and value
'''
__name__ = '%s.ingest' % self.__class__.__name__
schema_dict = self.schema
path_to_root = '.'
valid_data = self._ingest_dict(kwargs, schema_dict, path_to_root)
return valid_data
def query(self, query_criteria, valid_record=None):
'''
a core method for querying model valid data with criteria
**NOTE: input is only returned if all fields & qualifiers are valid for model
:param query_criteria: dictionary with model field names and query qualifiers
:param valid_record: dictionary with model valid record
:return: boolean (or QueryValidationError)
an example of how to construct the query_criteria argument:
query_criteria = {
'.path.to.number': {
'min_value': 4.5
},
'.path.to.string': {
'must_contain': [ '\\regex' ]
}
}
**NOTE: for a full list of operators for query_criteria based upon field
datatype, see either the query-rules.json file or REFERENCE file
'''
__name__ = '%s.query' % self.__class__.__name__
_query_arg = '%s(query_criteria={...})' % __name__
_record_arg = '%s(valid_record={...})' % __name__
# validate input
if not isinstance(query_criteria, dict):
raise ModelValidationError('%s must be a dictionary.' % _query_arg)
# convert javascript dot_path to class dot_path
criteria_copy = {}
equal_fields = []
dot_fields = []
for key, value in query_criteria.items():
copy_key = key
if not key:
copy_key = '.'
else:
if key[0] != '.':
copy_key = '.%s' % key
dot_fields.append(copy_key)
criteria_copy[copy_key] = value
if value.__class__ in self._datatype_classes[0:4]:
criteria_copy[copy_key] = {
'equal_to': value
}
equal_fields.append(copy_key)
# validate query criteria against query rules
query_kwargs = {
'fields_dict': criteria_copy,
'fields_rules': self.queryRules,
'declared_value': False
}
try:
self._validate_fields(**query_kwargs)
except ModelValidationError as err:
message = err.error['message']
for field in equal_fields:
equal_error = 'field %s qualifier equal_to' % field
if message.find(equal_error) > -1:
message = message.replace(equal_error, 'field %s' % field)
break
field_pattern = re.compile('ield\s(\..*?)\s')
field_name = field_pattern.findall(message)
if field_name:
if field_name[0] in dot_fields:
def _replace_field(x):
return 'ield %s ' % x.group(1)[1:]
message = field_pattern.sub(_replace_field, message)
raise QueryValidationError(message)
# query test record
if valid_record:
if not isinstance(valid_record, dict):
raise ModelValidationError('%s must be a dictionary.' % _record_arg)
for key, value in criteria_copy.items():
eval_outcome = self._evaluate_field(valid_record, key, value)
if not eval_outcome:
return False
return True
|
collectiveacuity/jsonModel
|
jsonmodel/validators.py
|
jsonModel.validate
|
python
|
def validate(self, input_data, path_to_root='', object_title=''):
'''
a core method for validating input against the model
input_data is only returned if all data is valid
:param input_data: list, dict, string, number, or boolean to validate
:param path_to_root: [optional] string with dot-path of model component
:param object_title: [optional] string with name of input to validate
:return: input_data (or InputValidationError)
'''
__name__ = '%s.validate' % self.__class__.__name__
_path_arg = '%s(path_to_root="...")' % __name__
_title_arg = '%s(object_title="...")' % __name__
# validate input
copy_path = path_to_root
if path_to_root:
if not isinstance(path_to_root, str):
raise ModelValidationError('%s must be a string.' % _path_arg)
else:
if path_to_root[0] != '.':
copy_path = '.%s' % path_to_root
if not copy_path in self.keyMap.keys():
raise ModelValidationError('%s does not exist in components %s.' % (_path_arg.replace('...', path_to_root), self.keyMap.keys()))
else:
copy_path = '.'
if object_title:
if not isinstance(object_title, str):
raise ModelValidationError('%s must be a string' % _title_arg)
# construct generic error dictionary
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[copy_path],
'failed_test': 'value_datatype',
'input_path': copy_path,
'error_value': input_data,
'error_code': 4001
}
# determine value type of input data
try:
input_index = self._datatype_classes.index(input_data.__class__)
except:
error_dict['error_value'] = input_data.__class__.__name__
raise InputValidationError(error_dict)
input_type = self._datatype_names[input_index]
# validate input data type
if input_type != self.keyMap[copy_path]['value_datatype']:
raise InputValidationError(error_dict)
# run helper method appropriate to data type
if input_type == 'boolean':
input_data = self._validate_boolean(input_data, copy_path, object_title)
elif input_type == 'number':
input_data = self._validate_number(input_data, copy_path, object_title)
elif input_type == 'string':
input_data = self._validate_string(input_data, copy_path, object_title)
elif input_type == 'list':
schema_list = self._reconstruct(copy_path)
input_data = self._validate_list(input_data, schema_list, copy_path, object_title)
elif input_type == 'map':
schema_dict = self._reconstruct(copy_path)
input_data = self._validate_dict(input_data, schema_dict, copy_path, object_title)
return input_data
|
a core method for validating input against the model
input_data is only returned if all data is valid
:param input_data: list, dict, string, number, or boolean to validate
:param path_to_root: [optional] string with dot-path of model component
:param object_title: [optional] string with name of input to validate
:return: input_data (or InputValidationError)
|
train
|
https://github.com/collectiveacuity/jsonModel/blob/1ea64c36d78add3faa7b85ff82c5ec685458c940/jsonmodel/validators.py#L1470-L1540
|
[
"def _validate_dict(self, input_dict, schema_dict, path_to_root, object_title=''):\n\n ''' a helper method for recursively validating keys in dictionaries\n\n :return input_dict\n '''\n\n# reconstruct key path to current dictionary in model\n rules_top_level_key = re.sub('\\[\\d+\\]', '[0]', path_to_root)\n map_rules = self.keyMap[rules_top_level_key]\n\n# construct list error report template\n map_error = {\n 'object_title': object_title,\n 'model_schema': self.schema,\n 'input_criteria': map_rules,\n 'failed_test': 'value_datatype',\n 'input_path': path_to_root,\n 'error_value': 0,\n 'error_code': 4001\n }\n\n# validate map size\n if 'min_size' in map_rules.keys():\n input_size = sys.getsizeof(json.dumps(str(input_dict)).replace(' ','')) - 51\n if input_size < map_rules['min_size']:\n map_error['failed_test'] = 'min_size'\n map_error['error_value'] = input_size\n map_error['error_code'] = 4031\n raise InputValidationError(map_error)\n if 'max_size' in map_rules.keys():\n input_size = sys.getsizeof(json.dumps(str(input_dict)).replace(' ','')) - 51\n if input_size > map_rules['max_size']:\n map_error['failed_test'] = 'max_size'\n map_error['error_value'] = input_size\n map_error['error_code'] = 4032\n raise InputValidationError(map_error)\n\n# construct lists of keys in input dictionary\n input_keys = []\n input_key_list = []\n for key in input_dict.keys():\n error_dict = {\n 'object_title': object_title,\n 'model_schema': self.schema,\n 'input_criteria': self.keyMap[rules_top_level_key],\n 'failed_test': 'key_datatype',\n 'input_path': path_to_root,\n 'error_value': key,\n 'error_code': 4004\n }\n error_dict['input_criteria']['key_datatype'] = 'string'\n if path_to_root == '.':\n if not isinstance(key, str):\n input_key_name = path_to_root + str(key)\n error_dict['input_path'] = input_key_name\n raise InputValidationError(error_dict)\n input_key_name = path_to_root + key\n else:\n if not isinstance(key, str):\n input_key_name = path_to_root + '.' + str(key)\n error_dict['input_path'] = input_key_name\n raise InputValidationError(error_dict)\n input_key_name = path_to_root + '.' + key\n input_keys.append(input_key_name)\n input_key_list.append(key)\n\n# TODO: validate top-level key and values against identical to reference\n\n# TODO: run lambda function and call validation\n\n# construct lists of keys in schema dictionary\n max_keys = []\n max_key_list = []\n req_keys = []\n req_key_list = []\n for key in schema_dict.keys():\n if path_to_root == '.':\n schema_key_name = path_to_root + key\n else:\n schema_key_name = path_to_root + '.' + key\n max_keys.append(schema_key_name)\n max_key_list.append(key)\n rules_schema_key_name = re.sub('\\[\\d+\\]', '[0]', schema_key_name)\n if self.keyMap[rules_schema_key_name]['required_field']:\n req_keys.append(schema_key_name)\n req_key_list.append(key)\n\n# validate existence of required fields\n missing_keys = set(req_keys) - set(input_keys)\n if missing_keys:\n error_dict = {\n 'object_title': object_title,\n 'model_schema': self.schema,\n 'input_criteria': self.keyMap[rules_top_level_key],\n 'failed_test': 'required_field',\n 'input_path': path_to_root,\n 'error_value': list(missing_keys),\n 'error_code': 4002\n }\n error_dict['input_criteria']['required_keys'] = req_keys\n raise InputValidationError(error_dict)\n\n# validate existence of extra fields\n extra_keys = set(input_keys) - set(max_keys)\n if extra_keys and not self.keyMap[rules_top_level_key]['extra_fields']:\n extra_key_list = []\n for key in extra_keys:\n pathless_key = re.sub(rules_top_level_key, '', key, count=1)\n extra_key_list.append(pathless_key)\n error_dict = {\n 'object_title': object_title,\n 'model_schema': self.schema,\n 'input_criteria': self.keyMap[rules_top_level_key],\n 'failed_test': 'extra_fields',\n 'input_path': path_to_root,\n 'error_value': extra_key_list,\n 'error_code': 4003\n }\n error_dict['input_criteria']['maximum_scope'] = max_key_list\n raise InputValidationError(error_dict)\n\n# validate datatype of value\n for key, value in input_dict.items():\n if path_to_root == '.':\n input_key_name = path_to_root + key\n else:\n input_key_name = path_to_root + '.' + key\n rules_input_key_name = re.sub('\\[\\d+\\]', '[0]', input_key_name)\n if input_key_name in max_keys:\n input_criteria = self.keyMap[rules_input_key_name]\n error_dict = {\n 'object_title': object_title,\n 'model_schema': self.schema,\n 'input_criteria': input_criteria,\n 'failed_test': 'value_datatype',\n 'input_path': input_key_name,\n 'error_value': value,\n 'error_code': 4001\n }\n try:\n value_index = self._datatype_classes.index(value.__class__)\n except:\n error_dict['error_value'] = value.__class__.__name__\n raise InputValidationError(error_dict)\n value_type = self._datatype_names[value_index]\n if input_criteria['value_datatype'] == 'null':\n pass\n else:\n if value_type != input_criteria['value_datatype']:\n raise InputValidationError(error_dict)\n\n# call appropriate validation sub-routine for datatype of value\n if value_type == 'boolean':\n input_dict[key] = self._validate_boolean(value, input_key_name, object_title)\n elif value_type == 'number':\n input_dict[key] = self._validate_number(value, input_key_name, object_title)\n elif value_type == 'string':\n input_dict[key] = self._validate_string(value, input_key_name, object_title)\n elif value_type == 'map':\n input_dict[key] = self._validate_dict(value, schema_dict[key], input_key_name, object_title)\n elif value_type == 'list':\n input_dict[key] = self._validate_list(value, schema_dict[key], input_key_name, object_title)\n\n# set default values for empty optional fields\n for key in max_key_list:\n if key not in input_key_list:\n indexed_key = max_keys[max_key_list.index(key)]\n if indexed_key in self.components.keys():\n if 'default_value' in self.components[indexed_key]:\n input_dict[key] = self.components[indexed_key]['default_value']\n\n return input_dict\n",
"def _validate_list(self, input_list, schema_list, path_to_root, object_title=''):\n\n '''\n a helper method for recursively validating items in a list\n\n :return: input_list\n '''\n\n# construct rules for list and items\n rules_path_to_root = re.sub('\\[\\d+\\]', '[0]', path_to_root)\n list_rules = self.keyMap[rules_path_to_root]\n initial_key = rules_path_to_root + '[0]'\n item_rules = self.keyMap[initial_key]\n\n# construct list error report template\n list_error = {\n 'object_title': object_title,\n 'model_schema': self.schema,\n 'input_criteria': list_rules,\n 'failed_test': 'value_datatype',\n 'input_path': path_to_root,\n 'error_value': 0,\n 'error_code': 4001\n }\n\n# validate list rules\n if 'min_size' in list_rules.keys():\n if len(input_list) < list_rules['min_size']:\n list_error['failed_test'] = 'min_size'\n list_error['error_value'] = len(input_list)\n list_error['error_code'] = 4031\n raise InputValidationError(list_error)\n if 'max_size' in list_rules.keys():\n if len(input_list) > list_rules['max_size']:\n list_error['failed_test'] = 'max_size'\n list_error['error_value'] = len(input_list)\n list_error['error_code'] = 4032\n raise InputValidationError(list_error)\n\n# construct item error report template\n item_error = {\n 'object_title': object_title,\n 'model_schema': self.schema,\n 'input_criteria': item_rules,\n 'failed_test': 'value_datatype',\n 'input_path': initial_key,\n 'error_value': None,\n 'error_code': 4001\n }\n\n# validate datatype of items\n for i in range(len(input_list)):\n input_path = path_to_root + '[%s]' % i\n item = input_list[i]\n item_error['input_path'] = input_path\n try:\n item_index = self._datatype_classes.index(item.__class__)\n except:\n item_error['error_value'] = item.__class__.__name__\n raise InputValidationError(item_error)\n item_type = self._datatype_names[item_index]\n item_error['error_value'] = item\n if item_rules['value_datatype'] == 'null':\n pass\n else:\n if item_type != item_rules['value_datatype']:\n raise InputValidationError(item_error)\n\n# call appropriate validation sub-routine for datatype of item\n if item_type == 'boolean':\n input_list[i] = self._validate_boolean(item, input_path, object_title)\n elif item_type == 'number':\n input_list[i] = self._validate_number(item, input_path, object_title)\n elif item_type == 'string':\n input_list[i] = self._validate_string(item, input_path, object_title)\n elif item_type == 'map':\n input_list[i] = self._validate_dict(item, schema_list[0], input_path, object_title)\n elif item_type == 'list':\n input_list[i] = self._validate_list(item, schema_list[0], input_path, object_title)\n\n# validate unique values in list\n if 'unique_values' in list_rules.keys():\n if len(set(input_list)) < len(input_list):\n list_error['failed_test'] = 'unique_values'\n list_error['error_value'] = input_list\n list_error['error_code'] = 4033\n raise InputValidationError(list_error)\n\n# TODO: validate top-level item values against identical to reference\n\n# TODO: run lambda function and call validation url\n\n return input_list\n",
"def _validate_number(self, input_number, path_to_root, object_title=''):\n\n '''\n a helper method for validating properties of a number\n\n :return: input_number\n '''\n\n rules_path_to_root = re.sub('\\[\\d+\\]', '[0]', path_to_root)\n input_criteria = self.keyMap[rules_path_to_root]\n error_dict = {\n 'object_title': object_title,\n 'model_schema': self.schema,\n 'input_criteria': input_criteria,\n 'failed_test': 'value_datatype',\n 'input_path': path_to_root,\n 'error_value': input_number,\n 'error_code': 4001\n }\n if 'integer_data' in input_criteria.keys():\n if input_criteria['integer_data'] and not isinstance(input_number, int):\n error_dict['failed_test'] = 'integer_data'\n error_dict['error_code'] = 4021\n raise InputValidationError(error_dict)\n if 'min_value' in input_criteria.keys():\n if input_number < input_criteria['min_value']:\n error_dict['failed_test'] = 'min_value'\n error_dict['error_code'] = 4022\n raise InputValidationError(error_dict)\n if 'max_value' in input_criteria.keys():\n if input_number > input_criteria['max_value']:\n error_dict['failed_test'] = 'max_value'\n error_dict['error_code'] = 4023\n raise InputValidationError(error_dict)\n if 'greater_than' in input_criteria.keys():\n if input_number <= input_criteria['greater_than']:\n error_dict['failed_test'] = 'greater_than'\n error_dict['error_code'] = 4024\n raise InputValidationError(error_dict)\n if 'less_than' in input_criteria.keys():\n if input_number >= input_criteria['less_than']:\n error_dict['failed_test'] = 'less_than'\n error_dict['error_code'] = 4025\n raise InputValidationError(error_dict)\n if 'equal_to' in input_criteria.keys():\n if input_number != input_criteria['equal_to']:\n error_dict['failed_test'] = 'equal_to'\n error_dict['error_code'] = 4026\n raise InputValidationError(error_dict)\n if 'discrete_values' in input_criteria.keys():\n if input_number not in input_criteria['discrete_values']:\n error_dict['failed_test'] = 'discrete_values'\n error_dict['error_code'] = 4041\n raise InputValidationError(error_dict)\n if 'excluded_values' in input_criteria.keys():\n if input_number in input_criteria['excluded_values']:\n error_dict['failed_test'] = 'excluded_values'\n error_dict['error_code'] = 4042\n raise InputValidationError(error_dict)\n\n# TODO: validate number against identical to reference\n\n# TODO: run lambda function and call validation url\n\n return input_number\n",
"def _validate_string(self, input_string, path_to_root, object_title=''):\n\n '''\n a helper method for validating properties of a string\n\n :return: input_string\n '''\n\n rules_path_to_root = re.sub('\\[\\d+\\]', '[0]', path_to_root)\n input_criteria = self.keyMap[rules_path_to_root]\n error_dict = {\n 'object_title': object_title,\n 'model_schema': self.schema,\n 'input_criteria': input_criteria,\n 'failed_test': 'value_datatype',\n 'input_path': path_to_root,\n 'error_value': input_string,\n 'error_code': 4001\n }\n if 'byte_data' in input_criteria.keys():\n if input_criteria['byte_data']:\n error_dict['failed_test'] = 'byte_data'\n error_dict['error_code'] = 4011\n try:\n decoded_bytes = b64decode(input_string)\n except:\n raise InputValidationError(error_dict)\n if not isinstance(decoded_bytes, bytes):\n raise InputValidationError(error_dict)\n if 'min_value' in input_criteria.keys():\n if input_string < input_criteria['min_value']:\n error_dict['failed_test'] = 'min_value'\n error_dict['error_code'] = 4022\n raise InputValidationError(error_dict)\n if 'max_value' in input_criteria.keys():\n if input_string > input_criteria['max_value']:\n error_dict['failed_test'] = 'max_value'\n error_dict['error_code'] = 4023\n raise InputValidationError(error_dict)\n if 'greater_than' in input_criteria.keys():\n if input_string <= input_criteria['greater_than']:\n error_dict['failed_test'] = 'greater_than'\n error_dict['error_code'] = 4024\n raise InputValidationError(error_dict)\n if 'less_than' in input_criteria.keys():\n if input_string >= input_criteria['less_than']:\n error_dict['failed_test'] = 'less_than'\n error_dict['error_code'] = 4025\n raise InputValidationError(error_dict)\n if 'equal_to' in input_criteria.keys():\n if input_string != input_criteria['equal_to']:\n error_dict['failed_test'] = 'equal_to'\n error_dict['error_code'] = 4026\n raise InputValidationError(error_dict)\n if 'min_length' in input_criteria.keys():\n if len(input_string) < input_criteria['min_length']:\n error_dict['failed_test'] = 'min_length'\n error_dict['error_code'] = 4012\n raise InputValidationError(error_dict)\n if 'max_length' in input_criteria.keys():\n if len(input_string) > input_criteria['max_length']:\n error_dict['failed_test'] = 'max_length'\n error_dict['error_code'] = 4013\n raise InputValidationError(error_dict)\n if 'must_not_contain' in input_criteria.keys():\n for regex in input_criteria['must_not_contain']:\n regex_pattern = re.compile(regex)\n if regex_pattern.findall(input_string):\n error_dict['failed_test'] = 'must_not_contain'\n error_dict['error_code'] = 4014\n raise InputValidationError(error_dict)\n if 'must_contain' in input_criteria.keys():\n for regex in input_criteria['must_contain']:\n regex_pattern = re.compile(regex)\n if not regex_pattern.findall(input_string):\n error_dict['failed_test'] = 'must_contain'\n error_dict['error_code'] = 4015\n raise InputValidationError(error_dict)\n if 'contains_either' in input_criteria.keys():\n regex_match = False\n for regex in input_criteria['contains_either']:\n regex_pattern = re.compile(regex)\n if regex_pattern.findall(input_string):\n regex_match = True\n if not regex_match:\n error_dict['failed_test'] = 'contains_either'\n error_dict['error_code'] = 4016\n raise InputValidationError(error_dict)\n if 'discrete_values' in input_criteria.keys():\n if input_string not in input_criteria['discrete_values']:\n error_dict['failed_test'] = 'discrete_values'\n error_dict['error_code'] = 4041\n raise InputValidationError(error_dict)\n if 'excluded_values' in input_criteria.keys():\n if input_string in input_criteria['excluded_values']:\n error_dict['failed_test'] = 'excluded_values'\n error_dict['error_code'] = 4042\n raise InputValidationError(error_dict)\n\n# TODO: validate string against identical to reference\n\n# TODO: run lambda function and call validation url\n\n return input_string\n",
"def _validate_boolean(self, input_boolean, path_to_root, object_title=''):\n\n '''\n a helper method for validating properties of a boolean\n\n :return: input_boolean\n '''\n\n rules_path_to_root = re.sub('\\[\\d+\\]', '[0]', path_to_root)\n input_criteria = self.keyMap[rules_path_to_root]\n error_dict = {\n 'object_title': object_title,\n 'model_schema': self.schema,\n 'input_criteria': input_criteria,\n 'failed_test': 'value_datatype',\n 'input_path': path_to_root,\n 'error_value': input_boolean,\n 'error_code': 4001\n }\n if 'equal_to' in input_criteria.keys():\n if input_boolean != input_criteria['equal_to']:\n error_dict['failed_test'] = 'equal_to'\n error_dict['error_code'] = 4026\n raise InputValidationError(error_dict)\n\n# TODO: validate boolean against identical to reference\n\n# TODO: run lambda function and call validation url\n\n return input_boolean\n",
"def _reconstruct(self, path_to_root):\n\n '''\n a helper method for finding the schema endpoint from a path to root\n\n :param path_to_root: string with dot path to root from\n :return: list, dict, string, number, or boolean at path to root\n '''\n\n# split path to root into segments\n item_pattern = re.compile('\\d+\\\\]')\n dot_pattern = re.compile('\\\\.|\\\\[')\n path_segments = dot_pattern.split(path_to_root)\n\n# construct base schema endpoint\n schema_endpoint = self.schema\n\n# reconstruct schema endpoint from segments\n if path_segments[1]:\n for i in range(1,len(path_segments)):\n if item_pattern.match(path_segments[i]):\n schema_endpoint = schema_endpoint[0]\n else:\n schema_endpoint = schema_endpoint[path_segments[i]]\n\n return schema_endpoint\n"
] |
class jsonModel(object):
__rules__ = jsonLoader('jsonmodel', 'models/model-rules.json')
def __init__(self, data_model, query_rules=None):
'''
a method for testing data model declaration & initializing the class
:param data_model: dictionary with json model architecture
:param query_rules: [optional] dictionary with valid field type qualifiers
:return: object with jsonModel methods
'''
# validate schema input
if not isinstance(data_model, dict):
raise ModelValidationError('Data model must be a dictionary.')
elif 'schema' not in data_model.keys():
raise ModelValidationError('Data model must have a schema key.')
elif not isinstance(data_model['schema'], dict):
raise ModelValidationError('Value for the data model "schema" field must be a dictionary.')
elif not data_model['schema']:
raise ModelValidationError('Data model "schema" field must not be empty.')
# construct base methods
from copy import deepcopy
data_model = deepcopy(data_model)
self.schema = data_model['schema']
model_map = mapModel(self.schema)
self.keyName = model_map.keyName
self.keyCriteria = model_map.keyCriteria
# construct protected type classes
self._datatype_names = mapModel._datatype_names
self._datatype_classes = mapModel._datatype_classes
# validate absence of item designators in keys
item_pattern = re.compile('\[\d+\]')
for i in range(len(self.keyName)):
patterns_found = item_pattern.findall(self.keyName[i])
if patterns_found:
for designator in patterns_found:
if designator != '[0]':
message = 'Key name for schema field %s must not contain the item designator pattern %s' % (self.keyName[i], designator)
raise ModelValidationError(message)
# validate existence of first item in list declarations
key_set = set(self.keyName)
for i in range(len(self.keyName)):
if self.keyCriteria[i]['value_datatype'] == 'list':
item_key = self.keyName[i] + '[0]'
if not item_key in key_set:
message = 'Schema field %s must declare an initial item for the list.' % self.keyName[i]
raise ModelValidationError(message)
# alter list requirement if first item is empty
else:
item_index = self.keyName.index(item_key)
if not self.keyCriteria[item_index]['required_field']:
self.keyCriteria[i]['required_field'] = False
# validate title input & construct title method
self.title = ''
if 'title' in data_model.keys():
if not isinstance(data_model['title'], str):
raise ModelValidationError('Value for model title must be a string.')
self.title = data_model['title']
# validate description input & construct description method
self.description = ''
if 'description' in data_model.keys():
if not isinstance(data_model['description'], str):
raise ModelValidationError('Value for model description must be a string.')
self.description = data_model['description']
# validate url input & construct title method
self.url = ''
if 'url' in data_model.keys():
if not isinstance(data_model['url'], str):
raise ModelValidationError('Value for model url must be a string.')
self.title = data_model['url']
# validate metadata input & construct metadata method
self.metadata = {}
if 'metadata' in data_model.keys():
if not isinstance(data_model['metadata'], dict):
raise ModelValidationError('Value for model metadata must be a dictionary.')
self.metadata = data_model['metadata']
# validate max size input & construct maxSize property
# self.maxSize = None
# if 'max_size' in data_model.keys():
# if not isinstance(data_model['max_size'], int):
# raise ModelValidationError('Value for model max_size must be a positive integer.')
# elif data_model['max_size'] < 0:
# raise ModelValidationError('Value for model max_size must be a positive integer.')
# elif data_model['max_size']:
# self.maxSize = data_model['max_size']
# validate components input & construct component property
self.components = {}
if 'components' in data_model.keys():
if not isinstance(data_model['components'], dict):
raise ModelValidationError('Value for model components must be a dictionary.')
self.components = self._validate_fields(data_model['components'], self.__rules__['components'])
# construct keyMap fields from key names and key criteria
self.keyMap = {}
for i in range(len(self.keyName)):
self.keyMap[self.keyName[i]] = self.keyCriteria[i]
for key, value in self.components.items():
# convert javascript dot_path to class dot_path
dot_key = ''
if not key:
dot_key = '.'
else:
if key[0] != '.':
dot_key = '.%s' % key
# add component declarations to keyMap
if key in self.keyMap.keys():
for k, v in self.components[key].items():
self.keyMap[key][k] = v
elif dot_key and dot_key in self.keyMap.keys():
for k, v in self.components[key].items():
self.keyMap[dot_key][k] = v
# validate default values in lists
self._validate_defaults(self.keyMap)
# construct queryRules property from class model rules
self.queryRules = {}
for key, value in self.__rules__['components'].items():
remove_from_query = [ 'required_field', 'default_value', 'example_values', 'field_title', 'field_position', 'field_description', 'field_metadata', 'extra_fields' ]
field_qualifiers = {
'value_exists': False
}
for k, v in value.items():
if k not in remove_from_query:
field_qualifiers[k] = v
self.queryRules[key] = field_qualifiers
# validate query rules input and replace queryRules property
if query_rules:
if not isinstance(query_rules, dict):
message = 'Value for query rules input must be a dictionary.'
raise ModelValidationError(message)
input_set = set(query_rules.keys())
req_set = set(self.queryRules.keys())
if input_set - req_set:
message = 'Query rules input may only have %s field key names.' % req_set
raise ModelValidationError(message)
elif req_set - input_set:
message = 'Query rules input must have all %s field key names.' % req_set
raise ModelValidationError(message)
for key in req_set:
if not isinstance(query_rules[key], dict):
message = 'Value for query rules %s field must be a dictionary.' % key
raise ModelValidationError(message)
input_qualifier_set = set(query_rules[key].keys())
req_qualifier_set = set(self.queryRules[key].keys())
if input_qualifier_set - req_qualifier_set:
message = 'Query rules field %s may only have qualifiers %s' % (key, req_qualifier_set)
raise ModelValidationError(message)
for k, v in query_rules[key].items():
if v.__class__ != self.queryRules[key][k].__class__:
qualifier_index = self._datatype_classes.index(self.queryRules[key][k].__class__)
qualifier_type = self._datatype_names[qualifier_index]
message = 'Value for query rules field %s qualifier %s must be a "%s" datatype.' % (key, k, qualifier_type)
raise ModelValidationError(message)
self.queryRules = query_rules
def _validate_fields(self, fields_dict, fields_rules, declared_value=True):
# validate key names in fields
for key, value in fields_dict.items():
# convert javascript dot_path to class dot_path
if not key:
key = '.'
else:
if key[0] != '.':
key = '.%s' % key
if key not in self.keyName:
raise ModelValidationError('Field %s is not a field declared in model schema.' % key)
elif not isinstance(value, dict):
raise ModelValidationError('Value for field %s must be a dictionary.' % key)
# validate field criteria are appropriate to field datatype
value_type = self.keyCriteria[self.keyName.index(key)]['value_datatype']
type_dict = {}
if value_type == 'string':
type_dict = fields_rules['.string_fields']
elif value_type == 'number':
type_dict = fields_rules['.number_fields']
elif value_type == 'boolean':
type_dict = fields_rules['.boolean_fields']
elif value_type == 'list':
type_dict = fields_rules['.list_fields']
elif value_type == 'map':
type_dict = fields_rules['.map_fields']
elif value_type == 'null':
type_dict = fields_rules['.null_fields']
if set(value.keys()) - set(type_dict.keys()):
raise ModelValidationError('Field %s may only have datatype %s qualifiers %s.' % (key, value_type, set(type_dict.keys())))
# validate criteria qualifier values are appropriate datatype
for k, v in value.items():
v_index = self._datatype_classes.index(v.__class__)
v_type = self._datatype_names[v_index]
qualifier_index = self._datatype_classes.index(type_dict[k].__class__)
qualifier_type = self._datatype_names[qualifier_index]
if v_type != qualifier_type:
message = 'Value for field %s qualifier %s must be a %s datatype.' % (key, k, qualifier_type)
raise ModelValidationError(message)
if qualifier_type == 'number':
if isinstance(type_dict[k], int):
if not isinstance(v, int):
message = 'Value for field %s qualifier %s must be an integer.' % (key, k)
raise ModelValidationError(message)
# validate internal logic of each qualifier value declaration
if k in ('must_not_contain', 'must_contain', 'contains_either'):
for item in v:
if not isinstance(item, str):
message = 'Each item in list field %s qualifier %s must be a string.' % (key, k)
raise ModelValidationError(message)
if k in ('min_length', 'max_length', 'min_size', 'max_size'):
if v < 0:
message = 'Value for field %s qualifier %s cannot be negative.' % (key, k)
raise ModelValidationError(message)
if k in ('discrete_values', 'excluded_values', 'example_values'):
for item in v:
if value_type == 'number':
if not isinstance(item, int) and not isinstance(item, float):
message = 'Each item in field %s qualifier %s list must be a number.' % (key, k)
raise ModelValidationError(message)
elif not isinstance(item, str):
message = 'Each item in list for field %s qualifier %s must be a string.' % (key, k)
raise ModelValidationError(message)
if k == 'identical_to':
if not v in self.keyName:
message = 'Value "%s" for field %s qualifier %s not found in components keys.' % (v, key, k)
raise ModelValidationError(message)
if k == 'unique_values':
if v:
item_name = key + '[0]'
item_type = self.keyCriteria[self.keyName.index(item_name)]['value_datatype']
if not item_type in ('number', 'string'):
message = 'Field %s[0] must be either a string or number if qualifier "unique_values": true' % key
raise ModelValidationError(message)
# validate lack of other qualifiers if value exist is false
if 'value_exists' in value.keys():
if not value['value_exists']:
if set(value.keys()) - {'value_exists'}:
message = 'If field %s qualifier value_exists: false, field may not have other qualifiers.' % key
raise ModelValidationError(message)
# validate size qualifiers against each other
size_qualifiers = ['min_size', 'max_size']
for qualifier in size_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
header = 'Value %s for %s' % (test_value, value_path)
if 'min_size' in value.keys():
if test_value < value['min_size']:
message = '%s must not be less than "min_size": %s' % (header, value['min_size'])
raise ModelValidationError(message)
if 'max_size' in value.keys():
if test_value > value['max_size']:
message = '%s must not be greater than "max_size": %s' % (header, value['max_size'])
raise ModelValidationError(message)
# validate length qualifiers against each other
length_qualifiers = ['min_length', 'max_length']
for qualifier in length_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
header = 'Value %s for %s' % (test_value, value_path)
if 'min_length' in value.keys():
if test_value < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if test_value > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
# validate range qualifiers against each other & length qualifiers
range_qualifiers = ['min_value', 'max_value', 'greater_than', 'less_than', 'equal_to']
for qualifier in range_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
header = 'Value %s%s%s for %s' % (quote_text, test_value, quote_text, value_path)
if 'min_value' in value.keys():
if test_value < value['min_value']:
message = '%s must not be less than "min_value": %s' % (header, value['min_value'])
raise ModelValidationError(message)
if 'max_value' in value.keys():
if test_value > value['max_value']:
message = '%s must not be greater than "max_value": %s' % (header, value['max_value'])
raise ModelValidationError(message)
if 'greater_than' in value.keys():
if test_value <= value['greater_than'] and not qualifier == 'greater_than':
message = '%s must be "greater_than": %s' % (header, value['greater_than'])
raise ModelValidationError(message)
if 'less_than' in value.keys():
if test_value >= value['less_than'] and not qualifier == 'less_than':
message = '%s must be "less_than": %s' % (header, value['less_than'])
raise ModelValidationError(message)
if 'min_length' in value.keys():
if len(test_value) < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if len(test_value) > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
if 'integer_data' in value.keys():
if value['integer_data']:
if not isinstance(test_value, int):
message = '%s must be an "integer_data".' % header
raise ModelValidationError(message)
if 'must_not_contain' in value.keys():
for regex in value['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
message = '%s matches regex pattern in "must_not_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'must_contain' in value.keys():
for regex in value['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(test_value):
message = '%s does not match regex pattern in "must_contain": ["%s"].' % (header, regex)
raise ModelValidationError(message)
if 'contains_either' in value.keys():
regex_match = False
regex_patterns = []
for regex in value['contains_either']:
regex_patterns.append(regex)
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
regex_match = True
if not regex_match:
message = '%s does not match any regex patterns in "contains_either": %s' % (header, regex_patterns)
raise ModelValidationError(message)
if 'byte_data' in value.keys():
if value['byte_data']:
if qualifier != 'equal_to':
message = '%s cannot be used with base64 encoded "byte_data".' % header
raise ModelValidationError(message)
# validate discrete value qualifiers against other criteria
schema_field = self.keyCriteria[self.keyName.index(key)]
discrete_qualifiers = ['declared_value', 'default_value', 'excluded_values', 'discrete_values', 'example_values']
for qualifier in discrete_qualifiers:
test_qualifier = False
if qualifier in schema_field:
test_qualifier = True
if qualifier == 'declared_value' and not schema_field[qualifier]:
test_qualifier = False
if qualifier in value.keys() or (test_qualifier and declared_value):
multiple_values = False
if qualifier in value.keys():
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
else:
test_list = [schema_field[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
header = 'Value %s%s%s for %s%s' % (quote_text, test_value, quote_text, value_path, item_text)
if 'min_value' in value.keys():
if test_value < value['min_value']:
message = '%s must not be less than "min_value": %s' % (header, value['min_value'])
raise ModelValidationError(message)
if 'max_value' in value.keys():
if test_value > value['max_value']:
message = '%s must not be greater than "max_value": %s' % (header, value['max_value'])
raise ModelValidationError(message)
if 'equal_to' in value.keys():
if test_value != value['equal_to']:
if qualifier != 'declared_value' and isinstance(test_value, bool):
message = '%s must be "equal_to": %s' % (header, value['equal_to'])
raise ModelValidationError(message)
if 'greater_than' in value.keys():
if test_value <= value['greater_than']:
message = '%s must be "greater_than": %s' % (header, value['greater_than'])
raise ModelValidationError(message)
if 'less_than' in value.keys():
if test_value >= value['less_than']:
message = '%s must be "less_than": %s' % (header, value['less_than'])
raise ModelValidationError(message)
if 'integer_data' in value.keys():
if value['integer_data']:
if not isinstance(test_value, int):
message = '%s must be an "integer_data".' % header
raise ModelValidationError(message)
if 'min_length' in value.keys():
if len(test_value) < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if len(test_value) > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
if 'must_not_contain' in value.keys():
for regex in value['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
message = '%s matches regex pattern in "must_not_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'must_contain' in value.keys():
for regex in value['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(test_value):
message = '%s does not match regex pattern in "must_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'contains_either' in value.keys():
regex_match = False
regex_patterns = []
for regex in value['contains_either']:
regex_patterns.append(regex)
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
regex_match = True
if not regex_match:
message = '%s does not match any regex patterns in "contains_either": %s' % (header, regex_patterns)
raise ModelValidationError(message)
if 'byte_data' in value.keys():
message = '%s cannot be base64 decoded to "byte_data".' % header
try:
decoded_bytes = b64decode(test_value)
except:
raise ModelValidationError(message)
if not isinstance(decoded_bytes, bytes):
raise ModelValidationError(message)
# validate discrete value qualifiers against each other
for qualifier in discrete_qualifiers:
test_qualifier = False
if qualifier in schema_field:
test_qualifier = True
if qualifier == 'declared_value' and not schema_field[qualifier]:
test_qualifier = False
if qualifier in value.keys() or (test_qualifier and declared_value):
multiple_values = False
if qualifier in value.keys():
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
else:
test_list = [schema_field[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
header = 'Value %s%s%s for %s%s' % (quote_text, test_value, quote_text, value_path, item_text)
if 'excluded_values' in value.keys():
if not qualifier == 'excluded_values':
if test_value in value['excluded_values']:
message = '%s cannot be one of "excluded_values": %s.' % (header, value['excluded_values'])
raise ModelValidationError(message)
if 'discrete_values' in value.keys():
if not qualifier == 'excluded_values':
if test_value not in value['discrete_values']:
message = '%s must be one of "discrete_values": %s' % (header, value['discrete_values'])
raise ModelValidationError(message)
return fields_dict
def _validate_defaults(self, fields_dict):
# validate key names in fields
for key, value in fields_dict.items():
# retrieve value type and type dict
value_type = self.keyCriteria[self.keyName.index(key)]['value_datatype']
# validate discrete value qualifiers against other criteria
qualifier = 'default_value'
if qualifier in value.keys():
multiple_values = False
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
qualifier_text = value_path + item_text
header = 'Value %s%s%s for %s' % (quote_text, test_value, quote_text, qualifier_text)
if value_type == 'list':
default_item_key = '%s[0]' % key
try:
self.validate(test_value, default_item_key, object_title=header)
except Exception as err:
raise ModelValidationError(str(err).strip().replace('field %s' % default_item_key, qualifier_text))
return fields_dict
def _evaluate_field(self, record_dict, field_name, field_criteria):
''' a helper method for evaluating record values based upon query criteria
:param record_dict: dictionary with model valid data to evaluate
:param field_name: string with path to root of query field
:param field_criteria: dictionary with query operators and qualifiers
:return: boolean (True if no field_criteria evaluate to false)
'''
# determine value existence criteria
value_exists = True
if 'value_exists' in field_criteria.keys():
if not field_criteria['value_exists']:
value_exists = False
# validate existence of field
field_exists = True
try:
record_values = self._walk(field_name, record_dict)
except:
field_exists = False
# evaluate existence query criteria
if value_exists != field_exists:
return False
elif not value_exists:
return True
# convert javascript dot_path to class dot_path
field_key = field_name
if not field_name:
field_key = '.'
else:
if field_name[0] != '.':
field_key = '.%s' % field_name
# evaluate other query criteria
for key, value in field_criteria.items():
if key in ('min_size', 'min_length'):
found = False
if self.keyMap[field_key]['value_datatype'] == 'map':
for record_value in record_values:
record_size = sys.getsizeof(json.dumps(str(record_value)).replace(' ','')) - 51
if record_size >= value:
found = True
break
else:
for record_value in record_values:
if len(record_value) >= value:
found = True
break
if not found:
return False
elif key in ('max_size', 'max_length'):
found = False
if self.keyMap[field_key]['value_datatype'] == 'map':
for record_value in record_values:
record_size = sys.getsizeof(json.dumps(str(record_value)).replace(' ','')) - 51
if record_size <= value:
found = True
break
else:
for record_value in record_values:
if len(record_value) <= value:
found = True
break
if not found:
return False
elif key == 'min_value':
found = False
for record_value in record_values:
if record_value >= value:
found = True
break
if not found:
return False
elif key == 'max_value':
found = False
for record_value in record_values:
if record_value <= value:
found = True
break
if not found:
return False
elif key == 'equal_to':
found = False
for record_value in record_values:
if record_value == value:
found = True
break
if not found:
return False
elif key == 'greater_than':
found = False
for record_value in record_values:
if record_value > value:
found = True
break
if not found:
return False
elif key == 'less_than':
found = False
for record_value in record_values:
if record_value < value:
found = True
break
if not found:
return False
elif key == 'excluded_values':
for record_value in record_values:
if record_value in value:
return False
elif key == 'discrete_values':
found = False
for record_value in record_values:
if record_value in value:
found = True
break
if not found:
return False
elif key == 'integer_data':
found = False
dummy_int = 1
for record_value in record_values:
if record_value.__class__ == dummy_int.__class__:
found = True
break
if value != found:
return False
elif key == 'byte_data':
found = False
for record_value in record_values:
try:
decoded_bytes = b64decode(record_value)
except:
decoded_bytes = ''
if isinstance(decoded_bytes, bytes):
found = True
break
if value != found:
return False
elif key == 'must_contain':
for regex in value:
regex_pattern = re.compile(regex)
found = False
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if not found:
return False
elif key == 'must_not_contain':
for regex in value:
regex_pattern = re.compile(regex)
found = False
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if found:
return False
elif key == 'contains_either':
found = False
for regex in value:
regex_pattern = re.compile(regex)
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if found:
break
if not found:
return False
elif key == 'unique_values':
for record_value in record_values:
unique_values = True
if len(record_value) != len(set(record_value)):
unique_values = False
if value != unique_values:
return False
return True
def _validate_dict(self, input_dict, schema_dict, path_to_root, object_title=''):
''' a helper method for recursively validating keys in dictionaries
:return input_dict
'''
# reconstruct key path to current dictionary in model
rules_top_level_key = re.sub('\[\d+\]', '[0]', path_to_root)
map_rules = self.keyMap[rules_top_level_key]
# construct list error report template
map_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': map_rules,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': 0,
'error_code': 4001
}
# validate map size
if 'min_size' in map_rules.keys():
input_size = sys.getsizeof(json.dumps(str(input_dict)).replace(' ','')) - 51
if input_size < map_rules['min_size']:
map_error['failed_test'] = 'min_size'
map_error['error_value'] = input_size
map_error['error_code'] = 4031
raise InputValidationError(map_error)
if 'max_size' in map_rules.keys():
input_size = sys.getsizeof(json.dumps(str(input_dict)).replace(' ','')) - 51
if input_size > map_rules['max_size']:
map_error['failed_test'] = 'max_size'
map_error['error_value'] = input_size
map_error['error_code'] = 4032
raise InputValidationError(map_error)
# construct lists of keys in input dictionary
input_keys = []
input_key_list = []
for key in input_dict.keys():
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'key_datatype',
'input_path': path_to_root,
'error_value': key,
'error_code': 4004
}
error_dict['input_criteria']['key_datatype'] = 'string'
if path_to_root == '.':
if not isinstance(key, str):
input_key_name = path_to_root + str(key)
error_dict['input_path'] = input_key_name
raise InputValidationError(error_dict)
input_key_name = path_to_root + key
else:
if not isinstance(key, str):
input_key_name = path_to_root + '.' + str(key)
error_dict['input_path'] = input_key_name
raise InputValidationError(error_dict)
input_key_name = path_to_root + '.' + key
input_keys.append(input_key_name)
input_key_list.append(key)
# TODO: validate top-level key and values against identical to reference
# TODO: run lambda function and call validation
# construct lists of keys in schema dictionary
max_keys = []
max_key_list = []
req_keys = []
req_key_list = []
for key in schema_dict.keys():
if path_to_root == '.':
schema_key_name = path_to_root + key
else:
schema_key_name = path_to_root + '.' + key
max_keys.append(schema_key_name)
max_key_list.append(key)
rules_schema_key_name = re.sub('\[\d+\]', '[0]', schema_key_name)
if self.keyMap[rules_schema_key_name]['required_field']:
req_keys.append(schema_key_name)
req_key_list.append(key)
# validate existence of required fields
missing_keys = set(req_keys) - set(input_keys)
if missing_keys:
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'required_field',
'input_path': path_to_root,
'error_value': list(missing_keys),
'error_code': 4002
}
error_dict['input_criteria']['required_keys'] = req_keys
raise InputValidationError(error_dict)
# validate existence of extra fields
extra_keys = set(input_keys) - set(max_keys)
if extra_keys and not self.keyMap[rules_top_level_key]['extra_fields']:
extra_key_list = []
for key in extra_keys:
pathless_key = re.sub(rules_top_level_key, '', key, count=1)
extra_key_list.append(pathless_key)
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'extra_fields',
'input_path': path_to_root,
'error_value': extra_key_list,
'error_code': 4003
}
error_dict['input_criteria']['maximum_scope'] = max_key_list
raise InputValidationError(error_dict)
# validate datatype of value
for key, value in input_dict.items():
if path_to_root == '.':
input_key_name = path_to_root + key
else:
input_key_name = path_to_root + '.' + key
rules_input_key_name = re.sub('\[\d+\]', '[0]', input_key_name)
if input_key_name in max_keys:
input_criteria = self.keyMap[rules_input_key_name]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': input_key_name,
'error_value': value,
'error_code': 4001
}
try:
value_index = self._datatype_classes.index(value.__class__)
except:
error_dict['error_value'] = value.__class__.__name__
raise InputValidationError(error_dict)
value_type = self._datatype_names[value_index]
if input_criteria['value_datatype'] == 'null':
pass
else:
if value_type != input_criteria['value_datatype']:
raise InputValidationError(error_dict)
# call appropriate validation sub-routine for datatype of value
if value_type == 'boolean':
input_dict[key] = self._validate_boolean(value, input_key_name, object_title)
elif value_type == 'number':
input_dict[key] = self._validate_number(value, input_key_name, object_title)
elif value_type == 'string':
input_dict[key] = self._validate_string(value, input_key_name, object_title)
elif value_type == 'map':
input_dict[key] = self._validate_dict(value, schema_dict[key], input_key_name, object_title)
elif value_type == 'list':
input_dict[key] = self._validate_list(value, schema_dict[key], input_key_name, object_title)
# set default values for empty optional fields
for key in max_key_list:
if key not in input_key_list:
indexed_key = max_keys[max_key_list.index(key)]
if indexed_key in self.components.keys():
if 'default_value' in self.components[indexed_key]:
input_dict[key] = self.components[indexed_key]['default_value']
return input_dict
def _validate_list(self, input_list, schema_list, path_to_root, object_title=''):
'''
a helper method for recursively validating items in a list
:return: input_list
'''
# construct rules for list and items
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
list_rules = self.keyMap[rules_path_to_root]
initial_key = rules_path_to_root + '[0]'
item_rules = self.keyMap[initial_key]
# construct list error report template
list_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': list_rules,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': 0,
'error_code': 4001
}
# validate list rules
if 'min_size' in list_rules.keys():
if len(input_list) < list_rules['min_size']:
list_error['failed_test'] = 'min_size'
list_error['error_value'] = len(input_list)
list_error['error_code'] = 4031
raise InputValidationError(list_error)
if 'max_size' in list_rules.keys():
if len(input_list) > list_rules['max_size']:
list_error['failed_test'] = 'max_size'
list_error['error_value'] = len(input_list)
list_error['error_code'] = 4032
raise InputValidationError(list_error)
# construct item error report template
item_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': item_rules,
'failed_test': 'value_datatype',
'input_path': initial_key,
'error_value': None,
'error_code': 4001
}
# validate datatype of items
for i in range(len(input_list)):
input_path = path_to_root + '[%s]' % i
item = input_list[i]
item_error['input_path'] = input_path
try:
item_index = self._datatype_classes.index(item.__class__)
except:
item_error['error_value'] = item.__class__.__name__
raise InputValidationError(item_error)
item_type = self._datatype_names[item_index]
item_error['error_value'] = item
if item_rules['value_datatype'] == 'null':
pass
else:
if item_type != item_rules['value_datatype']:
raise InputValidationError(item_error)
# call appropriate validation sub-routine for datatype of item
if item_type == 'boolean':
input_list[i] = self._validate_boolean(item, input_path, object_title)
elif item_type == 'number':
input_list[i] = self._validate_number(item, input_path, object_title)
elif item_type == 'string':
input_list[i] = self._validate_string(item, input_path, object_title)
elif item_type == 'map':
input_list[i] = self._validate_dict(item, schema_list[0], input_path, object_title)
elif item_type == 'list':
input_list[i] = self._validate_list(item, schema_list[0], input_path, object_title)
# validate unique values in list
if 'unique_values' in list_rules.keys():
if len(set(input_list)) < len(input_list):
list_error['failed_test'] = 'unique_values'
list_error['error_value'] = input_list
list_error['error_code'] = 4033
raise InputValidationError(list_error)
# TODO: validate top-level item values against identical to reference
# TODO: run lambda function and call validation url
return input_list
def _validate_number(self, input_number, path_to_root, object_title=''):
'''
a helper method for validating properties of a number
:return: input_number
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_number,
'error_code': 4001
}
if 'integer_data' in input_criteria.keys():
if input_criteria['integer_data'] and not isinstance(input_number, int):
error_dict['failed_test'] = 'integer_data'
error_dict['error_code'] = 4021
raise InputValidationError(error_dict)
if 'min_value' in input_criteria.keys():
if input_number < input_criteria['min_value']:
error_dict['failed_test'] = 'min_value'
error_dict['error_code'] = 4022
raise InputValidationError(error_dict)
if 'max_value' in input_criteria.keys():
if input_number > input_criteria['max_value']:
error_dict['failed_test'] = 'max_value'
error_dict['error_code'] = 4023
raise InputValidationError(error_dict)
if 'greater_than' in input_criteria.keys():
if input_number <= input_criteria['greater_than']:
error_dict['failed_test'] = 'greater_than'
error_dict['error_code'] = 4024
raise InputValidationError(error_dict)
if 'less_than' in input_criteria.keys():
if input_number >= input_criteria['less_than']:
error_dict['failed_test'] = 'less_than'
error_dict['error_code'] = 4025
raise InputValidationError(error_dict)
if 'equal_to' in input_criteria.keys():
if input_number != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
if 'discrete_values' in input_criteria.keys():
if input_number not in input_criteria['discrete_values']:
error_dict['failed_test'] = 'discrete_values'
error_dict['error_code'] = 4041
raise InputValidationError(error_dict)
if 'excluded_values' in input_criteria.keys():
if input_number in input_criteria['excluded_values']:
error_dict['failed_test'] = 'excluded_values'
error_dict['error_code'] = 4042
raise InputValidationError(error_dict)
# TODO: validate number against identical to reference
# TODO: run lambda function and call validation url
return input_number
def _validate_string(self, input_string, path_to_root, object_title=''):
'''
a helper method for validating properties of a string
:return: input_string
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_string,
'error_code': 4001
}
if 'byte_data' in input_criteria.keys():
if input_criteria['byte_data']:
error_dict['failed_test'] = 'byte_data'
error_dict['error_code'] = 4011
try:
decoded_bytes = b64decode(input_string)
except:
raise InputValidationError(error_dict)
if not isinstance(decoded_bytes, bytes):
raise InputValidationError(error_dict)
if 'min_value' in input_criteria.keys():
if input_string < input_criteria['min_value']:
error_dict['failed_test'] = 'min_value'
error_dict['error_code'] = 4022
raise InputValidationError(error_dict)
if 'max_value' in input_criteria.keys():
if input_string > input_criteria['max_value']:
error_dict['failed_test'] = 'max_value'
error_dict['error_code'] = 4023
raise InputValidationError(error_dict)
if 'greater_than' in input_criteria.keys():
if input_string <= input_criteria['greater_than']:
error_dict['failed_test'] = 'greater_than'
error_dict['error_code'] = 4024
raise InputValidationError(error_dict)
if 'less_than' in input_criteria.keys():
if input_string >= input_criteria['less_than']:
error_dict['failed_test'] = 'less_than'
error_dict['error_code'] = 4025
raise InputValidationError(error_dict)
if 'equal_to' in input_criteria.keys():
if input_string != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
if 'min_length' in input_criteria.keys():
if len(input_string) < input_criteria['min_length']:
error_dict['failed_test'] = 'min_length'
error_dict['error_code'] = 4012
raise InputValidationError(error_dict)
if 'max_length' in input_criteria.keys():
if len(input_string) > input_criteria['max_length']:
error_dict['failed_test'] = 'max_length'
error_dict['error_code'] = 4013
raise InputValidationError(error_dict)
if 'must_not_contain' in input_criteria.keys():
for regex in input_criteria['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(input_string):
error_dict['failed_test'] = 'must_not_contain'
error_dict['error_code'] = 4014
raise InputValidationError(error_dict)
if 'must_contain' in input_criteria.keys():
for regex in input_criteria['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(input_string):
error_dict['failed_test'] = 'must_contain'
error_dict['error_code'] = 4015
raise InputValidationError(error_dict)
if 'contains_either' in input_criteria.keys():
regex_match = False
for regex in input_criteria['contains_either']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(input_string):
regex_match = True
if not regex_match:
error_dict['failed_test'] = 'contains_either'
error_dict['error_code'] = 4016
raise InputValidationError(error_dict)
if 'discrete_values' in input_criteria.keys():
if input_string not in input_criteria['discrete_values']:
error_dict['failed_test'] = 'discrete_values'
error_dict['error_code'] = 4041
raise InputValidationError(error_dict)
if 'excluded_values' in input_criteria.keys():
if input_string in input_criteria['excluded_values']:
error_dict['failed_test'] = 'excluded_values'
error_dict['error_code'] = 4042
raise InputValidationError(error_dict)
# TODO: validate string against identical to reference
# TODO: run lambda function and call validation url
return input_string
def _validate_boolean(self, input_boolean, path_to_root, object_title=''):
'''
a helper method for validating properties of a boolean
:return: input_boolean
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_boolean,
'error_code': 4001
}
if 'equal_to' in input_criteria.keys():
if input_boolean != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
# TODO: validate boolean against identical to reference
# TODO: run lambda function and call validation url
return input_boolean
def _ingest_dict(self, input_dict, schema_dict, path_to_root):
'''
a helper method for ingesting keys, value pairs in a dictionary
:return: valid_dict
'''
valid_dict = {}
# construct path to root for rules
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
# iterate over keys in schema dict
for key, value in schema_dict.items():
key_path = path_to_root
if not key_path == '.':
key_path += '.'
key_path += key
rules_key_path = re.sub('\[\d+\]', '[0]', key_path)
value_match = False
if key in input_dict.keys():
value_index = self._datatype_classes.index(value.__class__)
value_type = self._datatype_names[value_index]
try:
v_index = self._datatype_classes.index(input_dict[key].__class__)
v_type = self._datatype_names[v_index]
if v_type == value_type:
value_match = True
except:
value_match = False
if value_match:
if value_type == 'null':
valid_dict[key] = input_dict[key]
elif value_type == 'boolean':
valid_dict[key] = self._ingest_boolean(input_dict[key], key_path)
elif value_type == 'number':
valid_dict[key] = self._ingest_number(input_dict[key], key_path)
elif value_type == 'string':
valid_dict[key] = self._ingest_string(input_dict[key], key_path)
elif value_type == 'map':
valid_dict[key] = self._ingest_dict(input_dict[key], schema_dict[key], key_path)
elif value_type == 'list':
valid_dict[key] = self._ingest_list(input_dict[key], schema_dict[key], key_path)
else:
value_type = self.keyMap[rules_key_path]['value_datatype']
if 'default_value' in self.keyMap[rules_key_path]:
valid_dict[key] = self.keyMap[rules_key_path]['default_value']
elif value_type == 'null':
valid_dict[key] = None
elif value_type == 'boolean':
valid_dict[key] = False
elif value_type == 'number':
valid_dict[key] = 0.0
if 'integer_data' in self.keyMap[rules_key_path].keys():
if self.keyMap[rules_key_path]['integer_data']:
valid_dict[key] = 0
elif value_type == 'string':
valid_dict[key] = ''
elif value_type == 'list':
valid_dict[key] = []
elif value_type == 'map':
valid_dict[key] = self._ingest_dict({}, schema_dict[key], key_path)
# add extra fields if set to True
if self.keyMap[rules_path_to_root]['extra_fields']:
for key, value in input_dict.items():
if key not in valid_dict.keys():
valid_dict[key] = value
return valid_dict
def _ingest_list(self, input_list, schema_list, path_to_root):
'''
a helper method for ingesting items in a list
:return: valid_list
'''
valid_list = []
# construct max list size
max_size = None
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'max_size' in self.keyMap[rules_path_to_root].keys():
if not self.keyMap[rules_path_to_root]['max_size']:
return valid_list
else:
max_size = self.keyMap[rules_path_to_root]['max_size']
# iterate over items in input list
if input_list:
rules_index = self._datatype_classes.index(schema_list[0].__class__)
rules_type = self._datatype_names[rules_index]
for i in range(len(input_list)):
item_path = '%s[%s]' % (path_to_root, i)
value_match = False
try:
item_index = self._datatype_classes.index(input_list[i].__class__)
item_type = self._datatype_names[item_index]
if item_type == rules_type:
value_match = True
except:
value_match = False
if value_match:
try:
if item_type == 'boolean':
valid_list.append(self._validate_boolean(input_list[i], item_path))
elif item_type == 'number':
valid_list.append(self._validate_number(input_list[i], item_path))
elif item_type == 'string':
valid_list.append(self._validate_string(input_list[i], item_path))
elif item_type == 'map':
valid_list.append(self._ingest_dict(input_list[i], schema_list[0], item_path))
elif item_type == 'list':
valid_list.append(self._ingest_list(input_list[i], schema_list[0], item_path))
except:
pass
if isinstance(max_size, int):
if len(valid_list) == max_size:
return valid_list
return valid_list
def _ingest_number(self, input_number, path_to_root):
'''
a helper method for ingesting a number
:return: valid_number
'''
valid_number = 0.0
try:
valid_number = self._validate_number(input_number, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_number = self.keyMap[rules_path_to_root]['default_value']
elif 'integer_data' in self.keyMap[rules_path_to_root].keys():
if self.keyMap[rules_path_to_root]['integer_data']:
valid_number = 0
return valid_number
def _ingest_string(self, input_string, path_to_root):
'''
a helper method for ingesting a string
:return: valid_string
'''
valid_string = ''
try:
valid_string = self._validate_string(input_string, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_string = self.keyMap[rules_path_to_root]['default_value']
return valid_string
def _ingest_boolean(self, input_boolean, path_to_root):
'''
a helper method for ingesting a boolean
:return: valid_boolean
'''
valid_boolean = False
try:
valid_boolean = self._validate_boolean(input_boolean, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_boolean = self.keyMap[rules_path_to_root]['default_value']
return valid_boolean
def _reconstruct(self, path_to_root):
'''
a helper method for finding the schema endpoint from a path to root
:param path_to_root: string with dot path to root from
:return: list, dict, string, number, or boolean at path to root
'''
# split path to root into segments
item_pattern = re.compile('\d+\\]')
dot_pattern = re.compile('\\.|\\[')
path_segments = dot_pattern.split(path_to_root)
# construct base schema endpoint
schema_endpoint = self.schema
# reconstruct schema endpoint from segments
if path_segments[1]:
for i in range(1,len(path_segments)):
if item_pattern.match(path_segments[i]):
schema_endpoint = schema_endpoint[0]
else:
schema_endpoint = schema_endpoint[path_segments[i]]
return schema_endpoint
def _walk(self, path_to_root, record_dict):
'''
a helper method for finding the record endpoint from a path to root
:param path_to_root: string with dot path to root from
:param record_dict:
:return: list, dict, string, number, or boolean at path to root
'''
# split path to root into segments
item_pattern = re.compile('\d+\\]')
dot_pattern = re.compile('\\.|\\[')
path_segments = dot_pattern.split(path_to_root)
# construct empty fields
record_endpoints = []
# determine starting position
if not path_segments[0]:
path_segments.pop(0)
# define internal recursive function
def _walk_int(path_segments, record_dict):
record_endpoint = record_dict
for i in range(0, len(path_segments)):
if item_pattern.match(path_segments[i]):
for j in range(0, len(record_endpoint)):
if len(path_segments) == 2:
record_endpoints.append(record_endpoint[j])
else:
stop_chain = False
for x in range(0, i):
if item_pattern.match(path_segments[x]):
stop_chain = True
if not stop_chain:
shortened_segments = []
for z in range(i + 1, len(path_segments)):
shortened_segments.append(path_segments[z])
_walk_int(shortened_segments, record_endpoint[j])
else:
stop_chain = False
for y in range(0, i):
if item_pattern.match(path_segments[y]):
stop_chain = True
if not stop_chain:
if len(path_segments) == i + 1:
record_endpoints.append(record_endpoint[path_segments[i]])
else:
record_endpoint = record_endpoint[path_segments[i]]
# conduct recursive walk
_walk_int(path_segments, record_dict)
return record_endpoints
def ingest(self, **kwargs):
'''
a core method to ingest and validate arbitrary keyword data
**NOTE: data is always returned with this method**
for each key in the model, a value is returned according
to the following priority:
1. value in kwargs if field passes validation test
2. default value declared for the key in the model
3. empty value appropriate to datatype of key in the model
**NOTE: as long as a default value is provided for each key-
value, returned data will be model valid
**NOTE: if 'extra_fields' is True for a dictionary, the key-
value pair of all fields in kwargs which are not declared in
the model will also be added to the corresponding dictionary
data
**NOTE: if 'max_size' is declared for a list, method will
stop adding input to the list once it reaches max size
:param kwargs: key, value pairs
:return: dictionary with keys and value
'''
__name__ = '%s.ingest' % self.__class__.__name__
schema_dict = self.schema
path_to_root = '.'
valid_data = self._ingest_dict(kwargs, schema_dict, path_to_root)
return valid_data
def query(self, query_criteria, valid_record=None):
'''
a core method for querying model valid data with criteria
**NOTE: input is only returned if all fields & qualifiers are valid for model
:param query_criteria: dictionary with model field names and query qualifiers
:param valid_record: dictionary with model valid record
:return: boolean (or QueryValidationError)
an example of how to construct the query_criteria argument:
query_criteria = {
'.path.to.number': {
'min_value': 4.5
},
'.path.to.string': {
'must_contain': [ '\\regex' ]
}
}
**NOTE: for a full list of operators for query_criteria based upon field
datatype, see either the query-rules.json file or REFERENCE file
'''
__name__ = '%s.query' % self.__class__.__name__
_query_arg = '%s(query_criteria={...})' % __name__
_record_arg = '%s(valid_record={...})' % __name__
# validate input
if not isinstance(query_criteria, dict):
raise ModelValidationError('%s must be a dictionary.' % _query_arg)
# convert javascript dot_path to class dot_path
criteria_copy = {}
equal_fields = []
dot_fields = []
for key, value in query_criteria.items():
copy_key = key
if not key:
copy_key = '.'
else:
if key[0] != '.':
copy_key = '.%s' % key
dot_fields.append(copy_key)
criteria_copy[copy_key] = value
if value.__class__ in self._datatype_classes[0:4]:
criteria_copy[copy_key] = {
'equal_to': value
}
equal_fields.append(copy_key)
# validate query criteria against query rules
query_kwargs = {
'fields_dict': criteria_copy,
'fields_rules': self.queryRules,
'declared_value': False
}
try:
self._validate_fields(**query_kwargs)
except ModelValidationError as err:
message = err.error['message']
for field in equal_fields:
equal_error = 'field %s qualifier equal_to' % field
if message.find(equal_error) > -1:
message = message.replace(equal_error, 'field %s' % field)
break
field_pattern = re.compile('ield\s(\..*?)\s')
field_name = field_pattern.findall(message)
if field_name:
if field_name[0] in dot_fields:
def _replace_field(x):
return 'ield %s ' % x.group(1)[1:]
message = field_pattern.sub(_replace_field, message)
raise QueryValidationError(message)
# query test record
if valid_record:
if not isinstance(valid_record, dict):
raise ModelValidationError('%s must be a dictionary.' % _record_arg)
for key, value in criteria_copy.items():
eval_outcome = self._evaluate_field(valid_record, key, value)
if not eval_outcome:
return False
return True
|
collectiveacuity/jsonModel
|
jsonmodel/validators.py
|
jsonModel.ingest
|
python
|
def ingest(self, **kwargs):
'''
a core method to ingest and validate arbitrary keyword data
**NOTE: data is always returned with this method**
for each key in the model, a value is returned according
to the following priority:
1. value in kwargs if field passes validation test
2. default value declared for the key in the model
3. empty value appropriate to datatype of key in the model
**NOTE: as long as a default value is provided for each key-
value, returned data will be model valid
**NOTE: if 'extra_fields' is True for a dictionary, the key-
value pair of all fields in kwargs which are not declared in
the model will also be added to the corresponding dictionary
data
**NOTE: if 'max_size' is declared for a list, method will
stop adding input to the list once it reaches max size
:param kwargs: key, value pairs
:return: dictionary with keys and value
'''
__name__ = '%s.ingest' % self.__class__.__name__
schema_dict = self.schema
path_to_root = '.'
valid_data = self._ingest_dict(kwargs, schema_dict, path_to_root)
return valid_data
|
a core method to ingest and validate arbitrary keyword data
**NOTE: data is always returned with this method**
for each key in the model, a value is returned according
to the following priority:
1. value in kwargs if field passes validation test
2. default value declared for the key in the model
3. empty value appropriate to datatype of key in the model
**NOTE: as long as a default value is provided for each key-
value, returned data will be model valid
**NOTE: if 'extra_fields' is True for a dictionary, the key-
value pair of all fields in kwargs which are not declared in
the model will also be added to the corresponding dictionary
data
**NOTE: if 'max_size' is declared for a list, method will
stop adding input to the list once it reaches max size
:param kwargs: key, value pairs
:return: dictionary with keys and value
|
train
|
https://github.com/collectiveacuity/jsonModel/blob/1ea64c36d78add3faa7b85ff82c5ec685458c940/jsonmodel/validators.py#L1542-L1578
|
[
"def _ingest_dict(self, input_dict, schema_dict, path_to_root):\n\n '''\n a helper method for ingesting keys, value pairs in a dictionary\n\n :return: valid_dict\n '''\n\n valid_dict = {}\n\n# construct path to root for rules\n rules_path_to_root = re.sub('\\[\\d+\\]', '[0]', path_to_root)\n\n# iterate over keys in schema dict\n for key, value in schema_dict.items():\n key_path = path_to_root\n if not key_path == '.':\n key_path += '.'\n key_path += key\n rules_key_path = re.sub('\\[\\d+\\]', '[0]', key_path)\n value_match = False\n if key in input_dict.keys():\n value_index = self._datatype_classes.index(value.__class__)\n value_type = self._datatype_names[value_index]\n try:\n v_index = self._datatype_classes.index(input_dict[key].__class__)\n v_type = self._datatype_names[v_index]\n if v_type == value_type:\n value_match = True\n except:\n value_match = False\n if value_match:\n if value_type == 'null':\n valid_dict[key] = input_dict[key]\n elif value_type == 'boolean':\n valid_dict[key] = self._ingest_boolean(input_dict[key], key_path)\n elif value_type == 'number':\n valid_dict[key] = self._ingest_number(input_dict[key], key_path)\n elif value_type == 'string':\n valid_dict[key] = self._ingest_string(input_dict[key], key_path)\n elif value_type == 'map':\n valid_dict[key] = self._ingest_dict(input_dict[key], schema_dict[key], key_path)\n elif value_type == 'list':\n valid_dict[key] = self._ingest_list(input_dict[key], schema_dict[key], key_path)\n else:\n value_type = self.keyMap[rules_key_path]['value_datatype']\n if 'default_value' in self.keyMap[rules_key_path]:\n valid_dict[key] = self.keyMap[rules_key_path]['default_value']\n elif value_type == 'null':\n valid_dict[key] = None\n elif value_type == 'boolean':\n valid_dict[key] = False\n elif value_type == 'number':\n valid_dict[key] = 0.0\n if 'integer_data' in self.keyMap[rules_key_path].keys():\n if self.keyMap[rules_key_path]['integer_data']:\n valid_dict[key] = 0\n elif value_type == 'string':\n valid_dict[key] = ''\n elif value_type == 'list':\n valid_dict[key] = []\n elif value_type == 'map':\n valid_dict[key] = self._ingest_dict({}, schema_dict[key], key_path)\n\n# add extra fields if set to True\n if self.keyMap[rules_path_to_root]['extra_fields']:\n for key, value in input_dict.items():\n if key not in valid_dict.keys():\n valid_dict[key] = value\n\n return valid_dict\n"
] |
class jsonModel(object):
__rules__ = jsonLoader('jsonmodel', 'models/model-rules.json')
def __init__(self, data_model, query_rules=None):
'''
a method for testing data model declaration & initializing the class
:param data_model: dictionary with json model architecture
:param query_rules: [optional] dictionary with valid field type qualifiers
:return: object with jsonModel methods
'''
# validate schema input
if not isinstance(data_model, dict):
raise ModelValidationError('Data model must be a dictionary.')
elif 'schema' not in data_model.keys():
raise ModelValidationError('Data model must have a schema key.')
elif not isinstance(data_model['schema'], dict):
raise ModelValidationError('Value for the data model "schema" field must be a dictionary.')
elif not data_model['schema']:
raise ModelValidationError('Data model "schema" field must not be empty.')
# construct base methods
from copy import deepcopy
data_model = deepcopy(data_model)
self.schema = data_model['schema']
model_map = mapModel(self.schema)
self.keyName = model_map.keyName
self.keyCriteria = model_map.keyCriteria
# construct protected type classes
self._datatype_names = mapModel._datatype_names
self._datatype_classes = mapModel._datatype_classes
# validate absence of item designators in keys
item_pattern = re.compile('\[\d+\]')
for i in range(len(self.keyName)):
patterns_found = item_pattern.findall(self.keyName[i])
if patterns_found:
for designator in patterns_found:
if designator != '[0]':
message = 'Key name for schema field %s must not contain the item designator pattern %s' % (self.keyName[i], designator)
raise ModelValidationError(message)
# validate existence of first item in list declarations
key_set = set(self.keyName)
for i in range(len(self.keyName)):
if self.keyCriteria[i]['value_datatype'] == 'list':
item_key = self.keyName[i] + '[0]'
if not item_key in key_set:
message = 'Schema field %s must declare an initial item for the list.' % self.keyName[i]
raise ModelValidationError(message)
# alter list requirement if first item is empty
else:
item_index = self.keyName.index(item_key)
if not self.keyCriteria[item_index]['required_field']:
self.keyCriteria[i]['required_field'] = False
# validate title input & construct title method
self.title = ''
if 'title' in data_model.keys():
if not isinstance(data_model['title'], str):
raise ModelValidationError('Value for model title must be a string.')
self.title = data_model['title']
# validate description input & construct description method
self.description = ''
if 'description' in data_model.keys():
if not isinstance(data_model['description'], str):
raise ModelValidationError('Value for model description must be a string.')
self.description = data_model['description']
# validate url input & construct title method
self.url = ''
if 'url' in data_model.keys():
if not isinstance(data_model['url'], str):
raise ModelValidationError('Value for model url must be a string.')
self.title = data_model['url']
# validate metadata input & construct metadata method
self.metadata = {}
if 'metadata' in data_model.keys():
if not isinstance(data_model['metadata'], dict):
raise ModelValidationError('Value for model metadata must be a dictionary.')
self.metadata = data_model['metadata']
# validate max size input & construct maxSize property
# self.maxSize = None
# if 'max_size' in data_model.keys():
# if not isinstance(data_model['max_size'], int):
# raise ModelValidationError('Value for model max_size must be a positive integer.')
# elif data_model['max_size'] < 0:
# raise ModelValidationError('Value for model max_size must be a positive integer.')
# elif data_model['max_size']:
# self.maxSize = data_model['max_size']
# validate components input & construct component property
self.components = {}
if 'components' in data_model.keys():
if not isinstance(data_model['components'], dict):
raise ModelValidationError('Value for model components must be a dictionary.')
self.components = self._validate_fields(data_model['components'], self.__rules__['components'])
# construct keyMap fields from key names and key criteria
self.keyMap = {}
for i in range(len(self.keyName)):
self.keyMap[self.keyName[i]] = self.keyCriteria[i]
for key, value in self.components.items():
# convert javascript dot_path to class dot_path
dot_key = ''
if not key:
dot_key = '.'
else:
if key[0] != '.':
dot_key = '.%s' % key
# add component declarations to keyMap
if key in self.keyMap.keys():
for k, v in self.components[key].items():
self.keyMap[key][k] = v
elif dot_key and dot_key in self.keyMap.keys():
for k, v in self.components[key].items():
self.keyMap[dot_key][k] = v
# validate default values in lists
self._validate_defaults(self.keyMap)
# construct queryRules property from class model rules
self.queryRules = {}
for key, value in self.__rules__['components'].items():
remove_from_query = [ 'required_field', 'default_value', 'example_values', 'field_title', 'field_position', 'field_description', 'field_metadata', 'extra_fields' ]
field_qualifiers = {
'value_exists': False
}
for k, v in value.items():
if k not in remove_from_query:
field_qualifiers[k] = v
self.queryRules[key] = field_qualifiers
# validate query rules input and replace queryRules property
if query_rules:
if not isinstance(query_rules, dict):
message = 'Value for query rules input must be a dictionary.'
raise ModelValidationError(message)
input_set = set(query_rules.keys())
req_set = set(self.queryRules.keys())
if input_set - req_set:
message = 'Query rules input may only have %s field key names.' % req_set
raise ModelValidationError(message)
elif req_set - input_set:
message = 'Query rules input must have all %s field key names.' % req_set
raise ModelValidationError(message)
for key in req_set:
if not isinstance(query_rules[key], dict):
message = 'Value for query rules %s field must be a dictionary.' % key
raise ModelValidationError(message)
input_qualifier_set = set(query_rules[key].keys())
req_qualifier_set = set(self.queryRules[key].keys())
if input_qualifier_set - req_qualifier_set:
message = 'Query rules field %s may only have qualifiers %s' % (key, req_qualifier_set)
raise ModelValidationError(message)
for k, v in query_rules[key].items():
if v.__class__ != self.queryRules[key][k].__class__:
qualifier_index = self._datatype_classes.index(self.queryRules[key][k].__class__)
qualifier_type = self._datatype_names[qualifier_index]
message = 'Value for query rules field %s qualifier %s must be a "%s" datatype.' % (key, k, qualifier_type)
raise ModelValidationError(message)
self.queryRules = query_rules
def _validate_fields(self, fields_dict, fields_rules, declared_value=True):
# validate key names in fields
for key, value in fields_dict.items():
# convert javascript dot_path to class dot_path
if not key:
key = '.'
else:
if key[0] != '.':
key = '.%s' % key
if key not in self.keyName:
raise ModelValidationError('Field %s is not a field declared in model schema.' % key)
elif not isinstance(value, dict):
raise ModelValidationError('Value for field %s must be a dictionary.' % key)
# validate field criteria are appropriate to field datatype
value_type = self.keyCriteria[self.keyName.index(key)]['value_datatype']
type_dict = {}
if value_type == 'string':
type_dict = fields_rules['.string_fields']
elif value_type == 'number':
type_dict = fields_rules['.number_fields']
elif value_type == 'boolean':
type_dict = fields_rules['.boolean_fields']
elif value_type == 'list':
type_dict = fields_rules['.list_fields']
elif value_type == 'map':
type_dict = fields_rules['.map_fields']
elif value_type == 'null':
type_dict = fields_rules['.null_fields']
if set(value.keys()) - set(type_dict.keys()):
raise ModelValidationError('Field %s may only have datatype %s qualifiers %s.' % (key, value_type, set(type_dict.keys())))
# validate criteria qualifier values are appropriate datatype
for k, v in value.items():
v_index = self._datatype_classes.index(v.__class__)
v_type = self._datatype_names[v_index]
qualifier_index = self._datatype_classes.index(type_dict[k].__class__)
qualifier_type = self._datatype_names[qualifier_index]
if v_type != qualifier_type:
message = 'Value for field %s qualifier %s must be a %s datatype.' % (key, k, qualifier_type)
raise ModelValidationError(message)
if qualifier_type == 'number':
if isinstance(type_dict[k], int):
if not isinstance(v, int):
message = 'Value for field %s qualifier %s must be an integer.' % (key, k)
raise ModelValidationError(message)
# validate internal logic of each qualifier value declaration
if k in ('must_not_contain', 'must_contain', 'contains_either'):
for item in v:
if not isinstance(item, str):
message = 'Each item in list field %s qualifier %s must be a string.' % (key, k)
raise ModelValidationError(message)
if k in ('min_length', 'max_length', 'min_size', 'max_size'):
if v < 0:
message = 'Value for field %s qualifier %s cannot be negative.' % (key, k)
raise ModelValidationError(message)
if k in ('discrete_values', 'excluded_values', 'example_values'):
for item in v:
if value_type == 'number':
if not isinstance(item, int) and not isinstance(item, float):
message = 'Each item in field %s qualifier %s list must be a number.' % (key, k)
raise ModelValidationError(message)
elif not isinstance(item, str):
message = 'Each item in list for field %s qualifier %s must be a string.' % (key, k)
raise ModelValidationError(message)
if k == 'identical_to':
if not v in self.keyName:
message = 'Value "%s" for field %s qualifier %s not found in components keys.' % (v, key, k)
raise ModelValidationError(message)
if k == 'unique_values':
if v:
item_name = key + '[0]'
item_type = self.keyCriteria[self.keyName.index(item_name)]['value_datatype']
if not item_type in ('number', 'string'):
message = 'Field %s[0] must be either a string or number if qualifier "unique_values": true' % key
raise ModelValidationError(message)
# validate lack of other qualifiers if value exist is false
if 'value_exists' in value.keys():
if not value['value_exists']:
if set(value.keys()) - {'value_exists'}:
message = 'If field %s qualifier value_exists: false, field may not have other qualifiers.' % key
raise ModelValidationError(message)
# validate size qualifiers against each other
size_qualifiers = ['min_size', 'max_size']
for qualifier in size_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
header = 'Value %s for %s' % (test_value, value_path)
if 'min_size' in value.keys():
if test_value < value['min_size']:
message = '%s must not be less than "min_size": %s' % (header, value['min_size'])
raise ModelValidationError(message)
if 'max_size' in value.keys():
if test_value > value['max_size']:
message = '%s must not be greater than "max_size": %s' % (header, value['max_size'])
raise ModelValidationError(message)
# validate length qualifiers against each other
length_qualifiers = ['min_length', 'max_length']
for qualifier in length_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
header = 'Value %s for %s' % (test_value, value_path)
if 'min_length' in value.keys():
if test_value < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if test_value > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
# validate range qualifiers against each other & length qualifiers
range_qualifiers = ['min_value', 'max_value', 'greater_than', 'less_than', 'equal_to']
for qualifier in range_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
header = 'Value %s%s%s for %s' % (quote_text, test_value, quote_text, value_path)
if 'min_value' in value.keys():
if test_value < value['min_value']:
message = '%s must not be less than "min_value": %s' % (header, value['min_value'])
raise ModelValidationError(message)
if 'max_value' in value.keys():
if test_value > value['max_value']:
message = '%s must not be greater than "max_value": %s' % (header, value['max_value'])
raise ModelValidationError(message)
if 'greater_than' in value.keys():
if test_value <= value['greater_than'] and not qualifier == 'greater_than':
message = '%s must be "greater_than": %s' % (header, value['greater_than'])
raise ModelValidationError(message)
if 'less_than' in value.keys():
if test_value >= value['less_than'] and not qualifier == 'less_than':
message = '%s must be "less_than": %s' % (header, value['less_than'])
raise ModelValidationError(message)
if 'min_length' in value.keys():
if len(test_value) < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if len(test_value) > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
if 'integer_data' in value.keys():
if value['integer_data']:
if not isinstance(test_value, int):
message = '%s must be an "integer_data".' % header
raise ModelValidationError(message)
if 'must_not_contain' in value.keys():
for regex in value['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
message = '%s matches regex pattern in "must_not_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'must_contain' in value.keys():
for regex in value['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(test_value):
message = '%s does not match regex pattern in "must_contain": ["%s"].' % (header, regex)
raise ModelValidationError(message)
if 'contains_either' in value.keys():
regex_match = False
regex_patterns = []
for regex in value['contains_either']:
regex_patterns.append(regex)
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
regex_match = True
if not regex_match:
message = '%s does not match any regex patterns in "contains_either": %s' % (header, regex_patterns)
raise ModelValidationError(message)
if 'byte_data' in value.keys():
if value['byte_data']:
if qualifier != 'equal_to':
message = '%s cannot be used with base64 encoded "byte_data".' % header
raise ModelValidationError(message)
# validate discrete value qualifiers against other criteria
schema_field = self.keyCriteria[self.keyName.index(key)]
discrete_qualifiers = ['declared_value', 'default_value', 'excluded_values', 'discrete_values', 'example_values']
for qualifier in discrete_qualifiers:
test_qualifier = False
if qualifier in schema_field:
test_qualifier = True
if qualifier == 'declared_value' and not schema_field[qualifier]:
test_qualifier = False
if qualifier in value.keys() or (test_qualifier and declared_value):
multiple_values = False
if qualifier in value.keys():
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
else:
test_list = [schema_field[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
header = 'Value %s%s%s for %s%s' % (quote_text, test_value, quote_text, value_path, item_text)
if 'min_value' in value.keys():
if test_value < value['min_value']:
message = '%s must not be less than "min_value": %s' % (header, value['min_value'])
raise ModelValidationError(message)
if 'max_value' in value.keys():
if test_value > value['max_value']:
message = '%s must not be greater than "max_value": %s' % (header, value['max_value'])
raise ModelValidationError(message)
if 'equal_to' in value.keys():
if test_value != value['equal_to']:
if qualifier != 'declared_value' and isinstance(test_value, bool):
message = '%s must be "equal_to": %s' % (header, value['equal_to'])
raise ModelValidationError(message)
if 'greater_than' in value.keys():
if test_value <= value['greater_than']:
message = '%s must be "greater_than": %s' % (header, value['greater_than'])
raise ModelValidationError(message)
if 'less_than' in value.keys():
if test_value >= value['less_than']:
message = '%s must be "less_than": %s' % (header, value['less_than'])
raise ModelValidationError(message)
if 'integer_data' in value.keys():
if value['integer_data']:
if not isinstance(test_value, int):
message = '%s must be an "integer_data".' % header
raise ModelValidationError(message)
if 'min_length' in value.keys():
if len(test_value) < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if len(test_value) > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
if 'must_not_contain' in value.keys():
for regex in value['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
message = '%s matches regex pattern in "must_not_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'must_contain' in value.keys():
for regex in value['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(test_value):
message = '%s does not match regex pattern in "must_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'contains_either' in value.keys():
regex_match = False
regex_patterns = []
for regex in value['contains_either']:
regex_patterns.append(regex)
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
regex_match = True
if not regex_match:
message = '%s does not match any regex patterns in "contains_either": %s' % (header, regex_patterns)
raise ModelValidationError(message)
if 'byte_data' in value.keys():
message = '%s cannot be base64 decoded to "byte_data".' % header
try:
decoded_bytes = b64decode(test_value)
except:
raise ModelValidationError(message)
if not isinstance(decoded_bytes, bytes):
raise ModelValidationError(message)
# validate discrete value qualifiers against each other
for qualifier in discrete_qualifiers:
test_qualifier = False
if qualifier in schema_field:
test_qualifier = True
if qualifier == 'declared_value' and not schema_field[qualifier]:
test_qualifier = False
if qualifier in value.keys() or (test_qualifier and declared_value):
multiple_values = False
if qualifier in value.keys():
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
else:
test_list = [schema_field[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
header = 'Value %s%s%s for %s%s' % (quote_text, test_value, quote_text, value_path, item_text)
if 'excluded_values' in value.keys():
if not qualifier == 'excluded_values':
if test_value in value['excluded_values']:
message = '%s cannot be one of "excluded_values": %s.' % (header, value['excluded_values'])
raise ModelValidationError(message)
if 'discrete_values' in value.keys():
if not qualifier == 'excluded_values':
if test_value not in value['discrete_values']:
message = '%s must be one of "discrete_values": %s' % (header, value['discrete_values'])
raise ModelValidationError(message)
return fields_dict
def _validate_defaults(self, fields_dict):
# validate key names in fields
for key, value in fields_dict.items():
# retrieve value type and type dict
value_type = self.keyCriteria[self.keyName.index(key)]['value_datatype']
# validate discrete value qualifiers against other criteria
qualifier = 'default_value'
if qualifier in value.keys():
multiple_values = False
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
qualifier_text = value_path + item_text
header = 'Value %s%s%s for %s' % (quote_text, test_value, quote_text, qualifier_text)
if value_type == 'list':
default_item_key = '%s[0]' % key
try:
self.validate(test_value, default_item_key, object_title=header)
except Exception as err:
raise ModelValidationError(str(err).strip().replace('field %s' % default_item_key, qualifier_text))
return fields_dict
def _evaluate_field(self, record_dict, field_name, field_criteria):
''' a helper method for evaluating record values based upon query criteria
:param record_dict: dictionary with model valid data to evaluate
:param field_name: string with path to root of query field
:param field_criteria: dictionary with query operators and qualifiers
:return: boolean (True if no field_criteria evaluate to false)
'''
# determine value existence criteria
value_exists = True
if 'value_exists' in field_criteria.keys():
if not field_criteria['value_exists']:
value_exists = False
# validate existence of field
field_exists = True
try:
record_values = self._walk(field_name, record_dict)
except:
field_exists = False
# evaluate existence query criteria
if value_exists != field_exists:
return False
elif not value_exists:
return True
# convert javascript dot_path to class dot_path
field_key = field_name
if not field_name:
field_key = '.'
else:
if field_name[0] != '.':
field_key = '.%s' % field_name
# evaluate other query criteria
for key, value in field_criteria.items():
if key in ('min_size', 'min_length'):
found = False
if self.keyMap[field_key]['value_datatype'] == 'map':
for record_value in record_values:
record_size = sys.getsizeof(json.dumps(str(record_value)).replace(' ','')) - 51
if record_size >= value:
found = True
break
else:
for record_value in record_values:
if len(record_value) >= value:
found = True
break
if not found:
return False
elif key in ('max_size', 'max_length'):
found = False
if self.keyMap[field_key]['value_datatype'] == 'map':
for record_value in record_values:
record_size = sys.getsizeof(json.dumps(str(record_value)).replace(' ','')) - 51
if record_size <= value:
found = True
break
else:
for record_value in record_values:
if len(record_value) <= value:
found = True
break
if not found:
return False
elif key == 'min_value':
found = False
for record_value in record_values:
if record_value >= value:
found = True
break
if not found:
return False
elif key == 'max_value':
found = False
for record_value in record_values:
if record_value <= value:
found = True
break
if not found:
return False
elif key == 'equal_to':
found = False
for record_value in record_values:
if record_value == value:
found = True
break
if not found:
return False
elif key == 'greater_than':
found = False
for record_value in record_values:
if record_value > value:
found = True
break
if not found:
return False
elif key == 'less_than':
found = False
for record_value in record_values:
if record_value < value:
found = True
break
if not found:
return False
elif key == 'excluded_values':
for record_value in record_values:
if record_value in value:
return False
elif key == 'discrete_values':
found = False
for record_value in record_values:
if record_value in value:
found = True
break
if not found:
return False
elif key == 'integer_data':
found = False
dummy_int = 1
for record_value in record_values:
if record_value.__class__ == dummy_int.__class__:
found = True
break
if value != found:
return False
elif key == 'byte_data':
found = False
for record_value in record_values:
try:
decoded_bytes = b64decode(record_value)
except:
decoded_bytes = ''
if isinstance(decoded_bytes, bytes):
found = True
break
if value != found:
return False
elif key == 'must_contain':
for regex in value:
regex_pattern = re.compile(regex)
found = False
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if not found:
return False
elif key == 'must_not_contain':
for regex in value:
regex_pattern = re.compile(regex)
found = False
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if found:
return False
elif key == 'contains_either':
found = False
for regex in value:
regex_pattern = re.compile(regex)
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if found:
break
if not found:
return False
elif key == 'unique_values':
for record_value in record_values:
unique_values = True
if len(record_value) != len(set(record_value)):
unique_values = False
if value != unique_values:
return False
return True
def _validate_dict(self, input_dict, schema_dict, path_to_root, object_title=''):
''' a helper method for recursively validating keys in dictionaries
:return input_dict
'''
# reconstruct key path to current dictionary in model
rules_top_level_key = re.sub('\[\d+\]', '[0]', path_to_root)
map_rules = self.keyMap[rules_top_level_key]
# construct list error report template
map_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': map_rules,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': 0,
'error_code': 4001
}
# validate map size
if 'min_size' in map_rules.keys():
input_size = sys.getsizeof(json.dumps(str(input_dict)).replace(' ','')) - 51
if input_size < map_rules['min_size']:
map_error['failed_test'] = 'min_size'
map_error['error_value'] = input_size
map_error['error_code'] = 4031
raise InputValidationError(map_error)
if 'max_size' in map_rules.keys():
input_size = sys.getsizeof(json.dumps(str(input_dict)).replace(' ','')) - 51
if input_size > map_rules['max_size']:
map_error['failed_test'] = 'max_size'
map_error['error_value'] = input_size
map_error['error_code'] = 4032
raise InputValidationError(map_error)
# construct lists of keys in input dictionary
input_keys = []
input_key_list = []
for key in input_dict.keys():
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'key_datatype',
'input_path': path_to_root,
'error_value': key,
'error_code': 4004
}
error_dict['input_criteria']['key_datatype'] = 'string'
if path_to_root == '.':
if not isinstance(key, str):
input_key_name = path_to_root + str(key)
error_dict['input_path'] = input_key_name
raise InputValidationError(error_dict)
input_key_name = path_to_root + key
else:
if not isinstance(key, str):
input_key_name = path_to_root + '.' + str(key)
error_dict['input_path'] = input_key_name
raise InputValidationError(error_dict)
input_key_name = path_to_root + '.' + key
input_keys.append(input_key_name)
input_key_list.append(key)
# TODO: validate top-level key and values against identical to reference
# TODO: run lambda function and call validation
# construct lists of keys in schema dictionary
max_keys = []
max_key_list = []
req_keys = []
req_key_list = []
for key in schema_dict.keys():
if path_to_root == '.':
schema_key_name = path_to_root + key
else:
schema_key_name = path_to_root + '.' + key
max_keys.append(schema_key_name)
max_key_list.append(key)
rules_schema_key_name = re.sub('\[\d+\]', '[0]', schema_key_name)
if self.keyMap[rules_schema_key_name]['required_field']:
req_keys.append(schema_key_name)
req_key_list.append(key)
# validate existence of required fields
missing_keys = set(req_keys) - set(input_keys)
if missing_keys:
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'required_field',
'input_path': path_to_root,
'error_value': list(missing_keys),
'error_code': 4002
}
error_dict['input_criteria']['required_keys'] = req_keys
raise InputValidationError(error_dict)
# validate existence of extra fields
extra_keys = set(input_keys) - set(max_keys)
if extra_keys and not self.keyMap[rules_top_level_key]['extra_fields']:
extra_key_list = []
for key in extra_keys:
pathless_key = re.sub(rules_top_level_key, '', key, count=1)
extra_key_list.append(pathless_key)
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'extra_fields',
'input_path': path_to_root,
'error_value': extra_key_list,
'error_code': 4003
}
error_dict['input_criteria']['maximum_scope'] = max_key_list
raise InputValidationError(error_dict)
# validate datatype of value
for key, value in input_dict.items():
if path_to_root == '.':
input_key_name = path_to_root + key
else:
input_key_name = path_to_root + '.' + key
rules_input_key_name = re.sub('\[\d+\]', '[0]', input_key_name)
if input_key_name in max_keys:
input_criteria = self.keyMap[rules_input_key_name]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': input_key_name,
'error_value': value,
'error_code': 4001
}
try:
value_index = self._datatype_classes.index(value.__class__)
except:
error_dict['error_value'] = value.__class__.__name__
raise InputValidationError(error_dict)
value_type = self._datatype_names[value_index]
if input_criteria['value_datatype'] == 'null':
pass
else:
if value_type != input_criteria['value_datatype']:
raise InputValidationError(error_dict)
# call appropriate validation sub-routine for datatype of value
if value_type == 'boolean':
input_dict[key] = self._validate_boolean(value, input_key_name, object_title)
elif value_type == 'number':
input_dict[key] = self._validate_number(value, input_key_name, object_title)
elif value_type == 'string':
input_dict[key] = self._validate_string(value, input_key_name, object_title)
elif value_type == 'map':
input_dict[key] = self._validate_dict(value, schema_dict[key], input_key_name, object_title)
elif value_type == 'list':
input_dict[key] = self._validate_list(value, schema_dict[key], input_key_name, object_title)
# set default values for empty optional fields
for key in max_key_list:
if key not in input_key_list:
indexed_key = max_keys[max_key_list.index(key)]
if indexed_key in self.components.keys():
if 'default_value' in self.components[indexed_key]:
input_dict[key] = self.components[indexed_key]['default_value']
return input_dict
def _validate_list(self, input_list, schema_list, path_to_root, object_title=''):
'''
a helper method for recursively validating items in a list
:return: input_list
'''
# construct rules for list and items
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
list_rules = self.keyMap[rules_path_to_root]
initial_key = rules_path_to_root + '[0]'
item_rules = self.keyMap[initial_key]
# construct list error report template
list_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': list_rules,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': 0,
'error_code': 4001
}
# validate list rules
if 'min_size' in list_rules.keys():
if len(input_list) < list_rules['min_size']:
list_error['failed_test'] = 'min_size'
list_error['error_value'] = len(input_list)
list_error['error_code'] = 4031
raise InputValidationError(list_error)
if 'max_size' in list_rules.keys():
if len(input_list) > list_rules['max_size']:
list_error['failed_test'] = 'max_size'
list_error['error_value'] = len(input_list)
list_error['error_code'] = 4032
raise InputValidationError(list_error)
# construct item error report template
item_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': item_rules,
'failed_test': 'value_datatype',
'input_path': initial_key,
'error_value': None,
'error_code': 4001
}
# validate datatype of items
for i in range(len(input_list)):
input_path = path_to_root + '[%s]' % i
item = input_list[i]
item_error['input_path'] = input_path
try:
item_index = self._datatype_classes.index(item.__class__)
except:
item_error['error_value'] = item.__class__.__name__
raise InputValidationError(item_error)
item_type = self._datatype_names[item_index]
item_error['error_value'] = item
if item_rules['value_datatype'] == 'null':
pass
else:
if item_type != item_rules['value_datatype']:
raise InputValidationError(item_error)
# call appropriate validation sub-routine for datatype of item
if item_type == 'boolean':
input_list[i] = self._validate_boolean(item, input_path, object_title)
elif item_type == 'number':
input_list[i] = self._validate_number(item, input_path, object_title)
elif item_type == 'string':
input_list[i] = self._validate_string(item, input_path, object_title)
elif item_type == 'map':
input_list[i] = self._validate_dict(item, schema_list[0], input_path, object_title)
elif item_type == 'list':
input_list[i] = self._validate_list(item, schema_list[0], input_path, object_title)
# validate unique values in list
if 'unique_values' in list_rules.keys():
if len(set(input_list)) < len(input_list):
list_error['failed_test'] = 'unique_values'
list_error['error_value'] = input_list
list_error['error_code'] = 4033
raise InputValidationError(list_error)
# TODO: validate top-level item values against identical to reference
# TODO: run lambda function and call validation url
return input_list
def _validate_number(self, input_number, path_to_root, object_title=''):
'''
a helper method for validating properties of a number
:return: input_number
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_number,
'error_code': 4001
}
if 'integer_data' in input_criteria.keys():
if input_criteria['integer_data'] and not isinstance(input_number, int):
error_dict['failed_test'] = 'integer_data'
error_dict['error_code'] = 4021
raise InputValidationError(error_dict)
if 'min_value' in input_criteria.keys():
if input_number < input_criteria['min_value']:
error_dict['failed_test'] = 'min_value'
error_dict['error_code'] = 4022
raise InputValidationError(error_dict)
if 'max_value' in input_criteria.keys():
if input_number > input_criteria['max_value']:
error_dict['failed_test'] = 'max_value'
error_dict['error_code'] = 4023
raise InputValidationError(error_dict)
if 'greater_than' in input_criteria.keys():
if input_number <= input_criteria['greater_than']:
error_dict['failed_test'] = 'greater_than'
error_dict['error_code'] = 4024
raise InputValidationError(error_dict)
if 'less_than' in input_criteria.keys():
if input_number >= input_criteria['less_than']:
error_dict['failed_test'] = 'less_than'
error_dict['error_code'] = 4025
raise InputValidationError(error_dict)
if 'equal_to' in input_criteria.keys():
if input_number != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
if 'discrete_values' in input_criteria.keys():
if input_number not in input_criteria['discrete_values']:
error_dict['failed_test'] = 'discrete_values'
error_dict['error_code'] = 4041
raise InputValidationError(error_dict)
if 'excluded_values' in input_criteria.keys():
if input_number in input_criteria['excluded_values']:
error_dict['failed_test'] = 'excluded_values'
error_dict['error_code'] = 4042
raise InputValidationError(error_dict)
# TODO: validate number against identical to reference
# TODO: run lambda function and call validation url
return input_number
def _validate_string(self, input_string, path_to_root, object_title=''):
'''
a helper method for validating properties of a string
:return: input_string
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_string,
'error_code': 4001
}
if 'byte_data' in input_criteria.keys():
if input_criteria['byte_data']:
error_dict['failed_test'] = 'byte_data'
error_dict['error_code'] = 4011
try:
decoded_bytes = b64decode(input_string)
except:
raise InputValidationError(error_dict)
if not isinstance(decoded_bytes, bytes):
raise InputValidationError(error_dict)
if 'min_value' in input_criteria.keys():
if input_string < input_criteria['min_value']:
error_dict['failed_test'] = 'min_value'
error_dict['error_code'] = 4022
raise InputValidationError(error_dict)
if 'max_value' in input_criteria.keys():
if input_string > input_criteria['max_value']:
error_dict['failed_test'] = 'max_value'
error_dict['error_code'] = 4023
raise InputValidationError(error_dict)
if 'greater_than' in input_criteria.keys():
if input_string <= input_criteria['greater_than']:
error_dict['failed_test'] = 'greater_than'
error_dict['error_code'] = 4024
raise InputValidationError(error_dict)
if 'less_than' in input_criteria.keys():
if input_string >= input_criteria['less_than']:
error_dict['failed_test'] = 'less_than'
error_dict['error_code'] = 4025
raise InputValidationError(error_dict)
if 'equal_to' in input_criteria.keys():
if input_string != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
if 'min_length' in input_criteria.keys():
if len(input_string) < input_criteria['min_length']:
error_dict['failed_test'] = 'min_length'
error_dict['error_code'] = 4012
raise InputValidationError(error_dict)
if 'max_length' in input_criteria.keys():
if len(input_string) > input_criteria['max_length']:
error_dict['failed_test'] = 'max_length'
error_dict['error_code'] = 4013
raise InputValidationError(error_dict)
if 'must_not_contain' in input_criteria.keys():
for regex in input_criteria['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(input_string):
error_dict['failed_test'] = 'must_not_contain'
error_dict['error_code'] = 4014
raise InputValidationError(error_dict)
if 'must_contain' in input_criteria.keys():
for regex in input_criteria['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(input_string):
error_dict['failed_test'] = 'must_contain'
error_dict['error_code'] = 4015
raise InputValidationError(error_dict)
if 'contains_either' in input_criteria.keys():
regex_match = False
for regex in input_criteria['contains_either']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(input_string):
regex_match = True
if not regex_match:
error_dict['failed_test'] = 'contains_either'
error_dict['error_code'] = 4016
raise InputValidationError(error_dict)
if 'discrete_values' in input_criteria.keys():
if input_string not in input_criteria['discrete_values']:
error_dict['failed_test'] = 'discrete_values'
error_dict['error_code'] = 4041
raise InputValidationError(error_dict)
if 'excluded_values' in input_criteria.keys():
if input_string in input_criteria['excluded_values']:
error_dict['failed_test'] = 'excluded_values'
error_dict['error_code'] = 4042
raise InputValidationError(error_dict)
# TODO: validate string against identical to reference
# TODO: run lambda function and call validation url
return input_string
def _validate_boolean(self, input_boolean, path_to_root, object_title=''):
'''
a helper method for validating properties of a boolean
:return: input_boolean
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_boolean,
'error_code': 4001
}
if 'equal_to' in input_criteria.keys():
if input_boolean != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
# TODO: validate boolean against identical to reference
# TODO: run lambda function and call validation url
return input_boolean
def _ingest_dict(self, input_dict, schema_dict, path_to_root):
'''
a helper method for ingesting keys, value pairs in a dictionary
:return: valid_dict
'''
valid_dict = {}
# construct path to root for rules
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
# iterate over keys in schema dict
for key, value in schema_dict.items():
key_path = path_to_root
if not key_path == '.':
key_path += '.'
key_path += key
rules_key_path = re.sub('\[\d+\]', '[0]', key_path)
value_match = False
if key in input_dict.keys():
value_index = self._datatype_classes.index(value.__class__)
value_type = self._datatype_names[value_index]
try:
v_index = self._datatype_classes.index(input_dict[key].__class__)
v_type = self._datatype_names[v_index]
if v_type == value_type:
value_match = True
except:
value_match = False
if value_match:
if value_type == 'null':
valid_dict[key] = input_dict[key]
elif value_type == 'boolean':
valid_dict[key] = self._ingest_boolean(input_dict[key], key_path)
elif value_type == 'number':
valid_dict[key] = self._ingest_number(input_dict[key], key_path)
elif value_type == 'string':
valid_dict[key] = self._ingest_string(input_dict[key], key_path)
elif value_type == 'map':
valid_dict[key] = self._ingest_dict(input_dict[key], schema_dict[key], key_path)
elif value_type == 'list':
valid_dict[key] = self._ingest_list(input_dict[key], schema_dict[key], key_path)
else:
value_type = self.keyMap[rules_key_path]['value_datatype']
if 'default_value' in self.keyMap[rules_key_path]:
valid_dict[key] = self.keyMap[rules_key_path]['default_value']
elif value_type == 'null':
valid_dict[key] = None
elif value_type == 'boolean':
valid_dict[key] = False
elif value_type == 'number':
valid_dict[key] = 0.0
if 'integer_data' in self.keyMap[rules_key_path].keys():
if self.keyMap[rules_key_path]['integer_data']:
valid_dict[key] = 0
elif value_type == 'string':
valid_dict[key] = ''
elif value_type == 'list':
valid_dict[key] = []
elif value_type == 'map':
valid_dict[key] = self._ingest_dict({}, schema_dict[key], key_path)
# add extra fields if set to True
if self.keyMap[rules_path_to_root]['extra_fields']:
for key, value in input_dict.items():
if key not in valid_dict.keys():
valid_dict[key] = value
return valid_dict
def _ingest_list(self, input_list, schema_list, path_to_root):
'''
a helper method for ingesting items in a list
:return: valid_list
'''
valid_list = []
# construct max list size
max_size = None
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'max_size' in self.keyMap[rules_path_to_root].keys():
if not self.keyMap[rules_path_to_root]['max_size']:
return valid_list
else:
max_size = self.keyMap[rules_path_to_root]['max_size']
# iterate over items in input list
if input_list:
rules_index = self._datatype_classes.index(schema_list[0].__class__)
rules_type = self._datatype_names[rules_index]
for i in range(len(input_list)):
item_path = '%s[%s]' % (path_to_root, i)
value_match = False
try:
item_index = self._datatype_classes.index(input_list[i].__class__)
item_type = self._datatype_names[item_index]
if item_type == rules_type:
value_match = True
except:
value_match = False
if value_match:
try:
if item_type == 'boolean':
valid_list.append(self._validate_boolean(input_list[i], item_path))
elif item_type == 'number':
valid_list.append(self._validate_number(input_list[i], item_path))
elif item_type == 'string':
valid_list.append(self._validate_string(input_list[i], item_path))
elif item_type == 'map':
valid_list.append(self._ingest_dict(input_list[i], schema_list[0], item_path))
elif item_type == 'list':
valid_list.append(self._ingest_list(input_list[i], schema_list[0], item_path))
except:
pass
if isinstance(max_size, int):
if len(valid_list) == max_size:
return valid_list
return valid_list
def _ingest_number(self, input_number, path_to_root):
'''
a helper method for ingesting a number
:return: valid_number
'''
valid_number = 0.0
try:
valid_number = self._validate_number(input_number, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_number = self.keyMap[rules_path_to_root]['default_value']
elif 'integer_data' in self.keyMap[rules_path_to_root].keys():
if self.keyMap[rules_path_to_root]['integer_data']:
valid_number = 0
return valid_number
def _ingest_string(self, input_string, path_to_root):
'''
a helper method for ingesting a string
:return: valid_string
'''
valid_string = ''
try:
valid_string = self._validate_string(input_string, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_string = self.keyMap[rules_path_to_root]['default_value']
return valid_string
def _ingest_boolean(self, input_boolean, path_to_root):
'''
a helper method for ingesting a boolean
:return: valid_boolean
'''
valid_boolean = False
try:
valid_boolean = self._validate_boolean(input_boolean, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_boolean = self.keyMap[rules_path_to_root]['default_value']
return valid_boolean
def _reconstruct(self, path_to_root):
'''
a helper method for finding the schema endpoint from a path to root
:param path_to_root: string with dot path to root from
:return: list, dict, string, number, or boolean at path to root
'''
# split path to root into segments
item_pattern = re.compile('\d+\\]')
dot_pattern = re.compile('\\.|\\[')
path_segments = dot_pattern.split(path_to_root)
# construct base schema endpoint
schema_endpoint = self.schema
# reconstruct schema endpoint from segments
if path_segments[1]:
for i in range(1,len(path_segments)):
if item_pattern.match(path_segments[i]):
schema_endpoint = schema_endpoint[0]
else:
schema_endpoint = schema_endpoint[path_segments[i]]
return schema_endpoint
def _walk(self, path_to_root, record_dict):
'''
a helper method for finding the record endpoint from a path to root
:param path_to_root: string with dot path to root from
:param record_dict:
:return: list, dict, string, number, or boolean at path to root
'''
# split path to root into segments
item_pattern = re.compile('\d+\\]')
dot_pattern = re.compile('\\.|\\[')
path_segments = dot_pattern.split(path_to_root)
# construct empty fields
record_endpoints = []
# determine starting position
if not path_segments[0]:
path_segments.pop(0)
# define internal recursive function
def _walk_int(path_segments, record_dict):
record_endpoint = record_dict
for i in range(0, len(path_segments)):
if item_pattern.match(path_segments[i]):
for j in range(0, len(record_endpoint)):
if len(path_segments) == 2:
record_endpoints.append(record_endpoint[j])
else:
stop_chain = False
for x in range(0, i):
if item_pattern.match(path_segments[x]):
stop_chain = True
if not stop_chain:
shortened_segments = []
for z in range(i + 1, len(path_segments)):
shortened_segments.append(path_segments[z])
_walk_int(shortened_segments, record_endpoint[j])
else:
stop_chain = False
for y in range(0, i):
if item_pattern.match(path_segments[y]):
stop_chain = True
if not stop_chain:
if len(path_segments) == i + 1:
record_endpoints.append(record_endpoint[path_segments[i]])
else:
record_endpoint = record_endpoint[path_segments[i]]
# conduct recursive walk
_walk_int(path_segments, record_dict)
return record_endpoints
def validate(self, input_data, path_to_root='', object_title=''):
'''
a core method for validating input against the model
input_data is only returned if all data is valid
:param input_data: list, dict, string, number, or boolean to validate
:param path_to_root: [optional] string with dot-path of model component
:param object_title: [optional] string with name of input to validate
:return: input_data (or InputValidationError)
'''
__name__ = '%s.validate' % self.__class__.__name__
_path_arg = '%s(path_to_root="...")' % __name__
_title_arg = '%s(object_title="...")' % __name__
# validate input
copy_path = path_to_root
if path_to_root:
if not isinstance(path_to_root, str):
raise ModelValidationError('%s must be a string.' % _path_arg)
else:
if path_to_root[0] != '.':
copy_path = '.%s' % path_to_root
if not copy_path in self.keyMap.keys():
raise ModelValidationError('%s does not exist in components %s.' % (_path_arg.replace('...', path_to_root), self.keyMap.keys()))
else:
copy_path = '.'
if object_title:
if not isinstance(object_title, str):
raise ModelValidationError('%s must be a string' % _title_arg)
# construct generic error dictionary
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[copy_path],
'failed_test': 'value_datatype',
'input_path': copy_path,
'error_value': input_data,
'error_code': 4001
}
# determine value type of input data
try:
input_index = self._datatype_classes.index(input_data.__class__)
except:
error_dict['error_value'] = input_data.__class__.__name__
raise InputValidationError(error_dict)
input_type = self._datatype_names[input_index]
# validate input data type
if input_type != self.keyMap[copy_path]['value_datatype']:
raise InputValidationError(error_dict)
# run helper method appropriate to data type
if input_type == 'boolean':
input_data = self._validate_boolean(input_data, copy_path, object_title)
elif input_type == 'number':
input_data = self._validate_number(input_data, copy_path, object_title)
elif input_type == 'string':
input_data = self._validate_string(input_data, copy_path, object_title)
elif input_type == 'list':
schema_list = self._reconstruct(copy_path)
input_data = self._validate_list(input_data, schema_list, copy_path, object_title)
elif input_type == 'map':
schema_dict = self._reconstruct(copy_path)
input_data = self._validate_dict(input_data, schema_dict, copy_path, object_title)
return input_data
def query(self, query_criteria, valid_record=None):
'''
a core method for querying model valid data with criteria
**NOTE: input is only returned if all fields & qualifiers are valid for model
:param query_criteria: dictionary with model field names and query qualifiers
:param valid_record: dictionary with model valid record
:return: boolean (or QueryValidationError)
an example of how to construct the query_criteria argument:
query_criteria = {
'.path.to.number': {
'min_value': 4.5
},
'.path.to.string': {
'must_contain': [ '\\regex' ]
}
}
**NOTE: for a full list of operators for query_criteria based upon field
datatype, see either the query-rules.json file or REFERENCE file
'''
__name__ = '%s.query' % self.__class__.__name__
_query_arg = '%s(query_criteria={...})' % __name__
_record_arg = '%s(valid_record={...})' % __name__
# validate input
if not isinstance(query_criteria, dict):
raise ModelValidationError('%s must be a dictionary.' % _query_arg)
# convert javascript dot_path to class dot_path
criteria_copy = {}
equal_fields = []
dot_fields = []
for key, value in query_criteria.items():
copy_key = key
if not key:
copy_key = '.'
else:
if key[0] != '.':
copy_key = '.%s' % key
dot_fields.append(copy_key)
criteria_copy[copy_key] = value
if value.__class__ in self._datatype_classes[0:4]:
criteria_copy[copy_key] = {
'equal_to': value
}
equal_fields.append(copy_key)
# validate query criteria against query rules
query_kwargs = {
'fields_dict': criteria_copy,
'fields_rules': self.queryRules,
'declared_value': False
}
try:
self._validate_fields(**query_kwargs)
except ModelValidationError as err:
message = err.error['message']
for field in equal_fields:
equal_error = 'field %s qualifier equal_to' % field
if message.find(equal_error) > -1:
message = message.replace(equal_error, 'field %s' % field)
break
field_pattern = re.compile('ield\s(\..*?)\s')
field_name = field_pattern.findall(message)
if field_name:
if field_name[0] in dot_fields:
def _replace_field(x):
return 'ield %s ' % x.group(1)[1:]
message = field_pattern.sub(_replace_field, message)
raise QueryValidationError(message)
# query test record
if valid_record:
if not isinstance(valid_record, dict):
raise ModelValidationError('%s must be a dictionary.' % _record_arg)
for key, value in criteria_copy.items():
eval_outcome = self._evaluate_field(valid_record, key, value)
if not eval_outcome:
return False
return True
|
collectiveacuity/jsonModel
|
jsonmodel/validators.py
|
jsonModel.query
|
python
|
def query(self, query_criteria, valid_record=None):
'''
a core method for querying model valid data with criteria
**NOTE: input is only returned if all fields & qualifiers are valid for model
:param query_criteria: dictionary with model field names and query qualifiers
:param valid_record: dictionary with model valid record
:return: boolean (or QueryValidationError)
an example of how to construct the query_criteria argument:
query_criteria = {
'.path.to.number': {
'min_value': 4.5
},
'.path.to.string': {
'must_contain': [ '\\regex' ]
}
}
**NOTE: for a full list of operators for query_criteria based upon field
datatype, see either the query-rules.json file or REFERENCE file
'''
__name__ = '%s.query' % self.__class__.__name__
_query_arg = '%s(query_criteria={...})' % __name__
_record_arg = '%s(valid_record={...})' % __name__
# validate input
if not isinstance(query_criteria, dict):
raise ModelValidationError('%s must be a dictionary.' % _query_arg)
# convert javascript dot_path to class dot_path
criteria_copy = {}
equal_fields = []
dot_fields = []
for key, value in query_criteria.items():
copy_key = key
if not key:
copy_key = '.'
else:
if key[0] != '.':
copy_key = '.%s' % key
dot_fields.append(copy_key)
criteria_copy[copy_key] = value
if value.__class__ in self._datatype_classes[0:4]:
criteria_copy[copy_key] = {
'equal_to': value
}
equal_fields.append(copy_key)
# validate query criteria against query rules
query_kwargs = {
'fields_dict': criteria_copy,
'fields_rules': self.queryRules,
'declared_value': False
}
try:
self._validate_fields(**query_kwargs)
except ModelValidationError as err:
message = err.error['message']
for field in equal_fields:
equal_error = 'field %s qualifier equal_to' % field
if message.find(equal_error) > -1:
message = message.replace(equal_error, 'field %s' % field)
break
field_pattern = re.compile('ield\s(\..*?)\s')
field_name = field_pattern.findall(message)
if field_name:
if field_name[0] in dot_fields:
def _replace_field(x):
return 'ield %s ' % x.group(1)[1:]
message = field_pattern.sub(_replace_field, message)
raise QueryValidationError(message)
# query test record
if valid_record:
if not isinstance(valid_record, dict):
raise ModelValidationError('%s must be a dictionary.' % _record_arg)
for key, value in criteria_copy.items():
eval_outcome = self._evaluate_field(valid_record, key, value)
if not eval_outcome:
return False
return True
|
a core method for querying model valid data with criteria
**NOTE: input is only returned if all fields & qualifiers are valid for model
:param query_criteria: dictionary with model field names and query qualifiers
:param valid_record: dictionary with model valid record
:return: boolean (or QueryValidationError)
an example of how to construct the query_criteria argument:
query_criteria = {
'.path.to.number': {
'min_value': 4.5
},
'.path.to.string': {
'must_contain': [ '\\regex' ]
}
}
**NOTE: for a full list of operators for query_criteria based upon field
datatype, see either the query-rules.json file or REFERENCE file
|
train
|
https://github.com/collectiveacuity/jsonModel/blob/1ea64c36d78add3faa7b85ff82c5ec685458c940/jsonmodel/validators.py#L1580-L1666
|
[
"def _validate_fields(self, fields_dict, fields_rules, declared_value=True):\n\n# validate key names in fields\n for key, value in fields_dict.items():\n\n # convert javascript dot_path to class dot_path\n if not key:\n key = '.'\n else:\n if key[0] != '.':\n key = '.%s' % key\n\n if key not in self.keyName:\n raise ModelValidationError('Field %s is not a field declared in model schema.' % key)\n elif not isinstance(value, dict):\n raise ModelValidationError('Value for field %s must be a dictionary.' % key)\n\n# validate field criteria are appropriate to field datatype\n value_type = self.keyCriteria[self.keyName.index(key)]['value_datatype']\n type_dict = {}\n if value_type == 'string':\n type_dict = fields_rules['.string_fields']\n elif value_type == 'number':\n type_dict = fields_rules['.number_fields']\n elif value_type == 'boolean':\n type_dict = fields_rules['.boolean_fields']\n elif value_type == 'list':\n type_dict = fields_rules['.list_fields']\n elif value_type == 'map':\n type_dict = fields_rules['.map_fields']\n elif value_type == 'null':\n type_dict = fields_rules['.null_fields']\n if set(value.keys()) - set(type_dict.keys()):\n raise ModelValidationError('Field %s may only have datatype %s qualifiers %s.' % (key, value_type, set(type_dict.keys())))\n\n# validate criteria qualifier values are appropriate datatype\n for k, v in value.items():\n v_index = self._datatype_classes.index(v.__class__)\n v_type = self._datatype_names[v_index]\n qualifier_index = self._datatype_classes.index(type_dict[k].__class__)\n qualifier_type = self._datatype_names[qualifier_index]\n if v_type != qualifier_type:\n message = 'Value for field %s qualifier %s must be a %s datatype.' % (key, k, qualifier_type)\n raise ModelValidationError(message)\n if qualifier_type == 'number':\n if isinstance(type_dict[k], int):\n if not isinstance(v, int):\n message = 'Value for field %s qualifier %s must be an integer.' % (key, k)\n raise ModelValidationError(message)\n\n# validate internal logic of each qualifier value declaration\n if k in ('must_not_contain', 'must_contain', 'contains_either'):\n for item in v:\n if not isinstance(item, str):\n message = 'Each item in list field %s qualifier %s must be a string.' % (key, k)\n raise ModelValidationError(message)\n if k in ('min_length', 'max_length', 'min_size', 'max_size'):\n if v < 0:\n message = 'Value for field %s qualifier %s cannot be negative.' % (key, k)\n raise ModelValidationError(message)\n if k in ('discrete_values', 'excluded_values', 'example_values'):\n for item in v:\n if value_type == 'number':\n if not isinstance(item, int) and not isinstance(item, float):\n message = 'Each item in field %s qualifier %s list must be a number.' % (key, k)\n raise ModelValidationError(message)\n elif not isinstance(item, str):\n message = 'Each item in list for field %s qualifier %s must be a string.' % (key, k)\n raise ModelValidationError(message)\n if k == 'identical_to':\n if not v in self.keyName:\n message = 'Value \"%s\" for field %s qualifier %s not found in components keys.' % (v, key, k)\n raise ModelValidationError(message)\n if k == 'unique_values':\n if v:\n item_name = key + '[0]'\n item_type = self.keyCriteria[self.keyName.index(item_name)]['value_datatype']\n if not item_type in ('number', 'string'):\n message = 'Field %s[0] must be either a string or number if qualifier \"unique_values\": true' % key\n raise ModelValidationError(message)\n\n# validate lack of other qualifiers if value exist is false\n if 'value_exists' in value.keys():\n if not value['value_exists']:\n if set(value.keys()) - {'value_exists'}:\n message = 'If field %s qualifier value_exists: false, field may not have other qualifiers.' % key\n raise ModelValidationError(message)\n\n# validate size qualifiers against each other\n size_qualifiers = ['min_size', 'max_size']\n for qualifier in size_qualifiers:\n if qualifier in value.keys():\n test_value = value[qualifier]\n value_path = 'field %s qualifier %s' % (key, qualifier)\n header = 'Value %s for %s' % (test_value, value_path)\n if 'min_size' in value.keys():\n if test_value < value['min_size']:\n message = '%s must not be less than \"min_size\": %s' % (header, value['min_size'])\n raise ModelValidationError(message)\n if 'max_size' in value.keys():\n if test_value > value['max_size']:\n message = '%s must not be greater than \"max_size\": %s' % (header, value['max_size'])\n raise ModelValidationError(message)\n\n# validate length qualifiers against each other\n length_qualifiers = ['min_length', 'max_length']\n for qualifier in length_qualifiers:\n if qualifier in value.keys():\n test_value = value[qualifier]\n value_path = 'field %s qualifier %s' % (key, qualifier)\n header = 'Value %s for %s' % (test_value, value_path)\n if 'min_length' in value.keys():\n if test_value < value['min_length']:\n message = '%s must be at least \"min_length\": %s' % (header, value['min_length'])\n raise ModelValidationError(message)\n if 'max_length' in value.keys():\n if test_value > value['max_length']:\n message = '%s cannot be more than \"max_length\": %s' % (header, value['max_length'])\n raise ModelValidationError(message)\n\n# validate range qualifiers against each other & length qualifiers\n range_qualifiers = ['min_value', 'max_value', 'greater_than', 'less_than', 'equal_to']\n for qualifier in range_qualifiers:\n if qualifier in value.keys():\n test_value = value[qualifier]\n value_path = 'field %s qualifier %s' % (key, qualifier)\n quote_text = ''\n if isinstance(test_value, str):\n quote_text = '\"'\n header = 'Value %s%s%s for %s' % (quote_text, test_value, quote_text, value_path)\n if 'min_value' in value.keys():\n if test_value < value['min_value']:\n message = '%s must not be less than \"min_value\": %s' % (header, value['min_value'])\n raise ModelValidationError(message)\n if 'max_value' in value.keys():\n if test_value > value['max_value']:\n message = '%s must not be greater than \"max_value\": %s' % (header, value['max_value'])\n raise ModelValidationError(message)\n if 'greater_than' in value.keys():\n if test_value <= value['greater_than'] and not qualifier == 'greater_than':\n message = '%s must be \"greater_than\": %s' % (header, value['greater_than'])\n raise ModelValidationError(message)\n if 'less_than' in value.keys():\n if test_value >= value['less_than'] and not qualifier == 'less_than':\n message = '%s must be \"less_than\": %s' % (header, value['less_than'])\n raise ModelValidationError(message)\n if 'min_length' in value.keys():\n if len(test_value) < value['min_length']:\n message = '%s must be at least \"min_length\": %s' % (header, value['min_length'])\n raise ModelValidationError(message)\n if 'max_length' in value.keys():\n if len(test_value) > value['max_length']:\n message = '%s cannot be more than \"max_length\": %s' % (header, value['max_length'])\n raise ModelValidationError(message)\n if 'integer_data' in value.keys():\n if value['integer_data']:\n if not isinstance(test_value, int):\n message = '%s must be an \"integer_data\".' % header\n raise ModelValidationError(message)\n if 'must_not_contain' in value.keys():\n for regex in value['must_not_contain']:\n regex_pattern = re.compile(regex)\n if regex_pattern.findall(test_value):\n message = '%s matches regex pattern in \"must_not_contain\": [\"%s\"]' % (header, regex)\n raise ModelValidationError(message)\n if 'must_contain' in value.keys():\n for regex in value['must_contain']:\n regex_pattern = re.compile(regex)\n if not regex_pattern.findall(test_value):\n message = '%s does not match regex pattern in \"must_contain\": [\"%s\"].' % (header, regex)\n raise ModelValidationError(message)\n if 'contains_either' in value.keys():\n regex_match = False\n regex_patterns = []\n for regex in value['contains_either']:\n regex_patterns.append(regex)\n regex_pattern = re.compile(regex)\n if regex_pattern.findall(test_value):\n regex_match = True\n if not regex_match:\n message = '%s does not match any regex patterns in \"contains_either\": %s' % (header, regex_patterns)\n raise ModelValidationError(message)\n if 'byte_data' in value.keys():\n if value['byte_data']:\n if qualifier != 'equal_to':\n message = '%s cannot be used with base64 encoded \"byte_data\".' % header\n raise ModelValidationError(message)\n\n# validate discrete value qualifiers against other criteria\n schema_field = self.keyCriteria[self.keyName.index(key)]\n discrete_qualifiers = ['declared_value', 'default_value', 'excluded_values', 'discrete_values', 'example_values']\n for qualifier in discrete_qualifiers:\n test_qualifier = False\n if qualifier in schema_field:\n test_qualifier = True\n if qualifier == 'declared_value' and not schema_field[qualifier]:\n test_qualifier = False\n if qualifier in value.keys() or (test_qualifier and declared_value):\n multiple_values = False\n if qualifier in value.keys():\n if isinstance(value[qualifier], list):\n test_list = value[qualifier]\n multiple_values = True\n else:\n test_list = [value[qualifier]]\n else:\n test_list = [schema_field[qualifier]]\n value_path = 'field %s qualifier %s' % (key, qualifier)\n for i in range(len(test_list)):\n test_value = test_list[i]\n quote_text = ''\n if isinstance(test_value, str):\n quote_text = '\"'\n item_text = ''\n if multiple_values:\n item_text = '[%s]' % i\n header = 'Value %s%s%s for %s%s' % (quote_text, test_value, quote_text, value_path, item_text)\n if 'min_value' in value.keys():\n if test_value < value['min_value']:\n message = '%s must not be less than \"min_value\": %s' % (header, value['min_value'])\n raise ModelValidationError(message)\n if 'max_value' in value.keys():\n if test_value > value['max_value']:\n message = '%s must not be greater than \"max_value\": %s' % (header, value['max_value'])\n raise ModelValidationError(message)\n if 'equal_to' in value.keys():\n if test_value != value['equal_to']:\n if qualifier != 'declared_value' and isinstance(test_value, bool):\n message = '%s must be \"equal_to\": %s' % (header, value['equal_to'])\n raise ModelValidationError(message)\n if 'greater_than' in value.keys():\n if test_value <= value['greater_than']:\n message = '%s must be \"greater_than\": %s' % (header, value['greater_than'])\n raise ModelValidationError(message)\n if 'less_than' in value.keys():\n if test_value >= value['less_than']:\n message = '%s must be \"less_than\": %s' % (header, value['less_than'])\n raise ModelValidationError(message)\n if 'integer_data' in value.keys():\n if value['integer_data']:\n if not isinstance(test_value, int):\n message = '%s must be an \"integer_data\".' % header\n raise ModelValidationError(message)\n if 'min_length' in value.keys():\n if len(test_value) < value['min_length']:\n message = '%s must be at least \"min_length\": %s' % (header, value['min_length'])\n raise ModelValidationError(message)\n if 'max_length' in value.keys():\n if len(test_value) > value['max_length']:\n message = '%s cannot be more than \"max_length\": %s' % (header, value['max_length'])\n raise ModelValidationError(message)\n if 'must_not_contain' in value.keys():\n for regex in value['must_not_contain']:\n regex_pattern = re.compile(regex)\n if regex_pattern.findall(test_value):\n message = '%s matches regex pattern in \"must_not_contain\": [\"%s\"]' % (header, regex)\n raise ModelValidationError(message)\n if 'must_contain' in value.keys():\n for regex in value['must_contain']:\n regex_pattern = re.compile(regex)\n if not regex_pattern.findall(test_value):\n message = '%s does not match regex pattern in \"must_contain\": [\"%s\"]' % (header, regex)\n raise ModelValidationError(message)\n if 'contains_either' in value.keys():\n regex_match = False\n regex_patterns = []\n for regex in value['contains_either']:\n regex_patterns.append(regex)\n regex_pattern = re.compile(regex)\n if regex_pattern.findall(test_value):\n regex_match = True\n if not regex_match:\n message = '%s does not match any regex patterns in \"contains_either\": %s' % (header, regex_patterns)\n raise ModelValidationError(message)\n if 'byte_data' in value.keys():\n message = '%s cannot be base64 decoded to \"byte_data\".' % header\n try:\n decoded_bytes = b64decode(test_value)\n except:\n raise ModelValidationError(message)\n if not isinstance(decoded_bytes, bytes):\n raise ModelValidationError(message)\n\n# validate discrete value qualifiers against each other\n for qualifier in discrete_qualifiers:\n test_qualifier = False\n if qualifier in schema_field:\n test_qualifier = True\n if qualifier == 'declared_value' and not schema_field[qualifier]:\n test_qualifier = False\n if qualifier in value.keys() or (test_qualifier and declared_value):\n multiple_values = False\n if qualifier in value.keys():\n if isinstance(value[qualifier], list):\n test_list = value[qualifier]\n multiple_values = True\n else:\n test_list = [value[qualifier]]\n else:\n test_list = [schema_field[qualifier]]\n value_path = 'field %s qualifier %s' % (key, qualifier)\n for i in range(len(test_list)):\n test_value = test_list[i]\n quote_text = ''\n if isinstance(test_value, str):\n quote_text = '\"'\n item_text = ''\n if multiple_values:\n item_text = '[%s]' % i\n header = 'Value %s%s%s for %s%s' % (quote_text, test_value, quote_text, value_path, item_text)\n if 'excluded_values' in value.keys():\n if not qualifier == 'excluded_values':\n if test_value in value['excluded_values']:\n message = '%s cannot be one of \"excluded_values\": %s.' % (header, value['excluded_values'])\n raise ModelValidationError(message)\n if 'discrete_values' in value.keys():\n if not qualifier == 'excluded_values':\n if test_value not in value['discrete_values']:\n message = '%s must be one of \"discrete_values\": %s' % (header, value['discrete_values'])\n raise ModelValidationError(message)\n\n return fields_dict\n",
"def _evaluate_field(self, record_dict, field_name, field_criteria):\n\n ''' a helper method for evaluating record values based upon query criteria\n\n :param record_dict: dictionary with model valid data to evaluate\n :param field_name: string with path to root of query field\n :param field_criteria: dictionary with query operators and qualifiers\n :return: boolean (True if no field_criteria evaluate to false)\n '''\n\n# determine value existence criteria\n value_exists = True\n if 'value_exists' in field_criteria.keys():\n if not field_criteria['value_exists']:\n value_exists = False\n\n# validate existence of field\n field_exists = True\n try:\n record_values = self._walk(field_name, record_dict)\n except:\n field_exists = False\n\n# evaluate existence query criteria\n if value_exists != field_exists:\n return False\n elif not value_exists:\n return True\n\n# convert javascript dot_path to class dot_path\n field_key = field_name\n if not field_name:\n field_key = '.'\n else:\n if field_name[0] != '.':\n field_key = '.%s' % field_name\n\n# evaluate other query criteria\n for key, value in field_criteria.items():\n if key in ('min_size', 'min_length'):\n found = False\n if self.keyMap[field_key]['value_datatype'] == 'map':\n for record_value in record_values:\n record_size = sys.getsizeof(json.dumps(str(record_value)).replace(' ','')) - 51\n if record_size >= value:\n found = True\n break\n else:\n for record_value in record_values:\n if len(record_value) >= value:\n found = True\n break\n if not found:\n return False\n elif key in ('max_size', 'max_length'):\n found = False\n if self.keyMap[field_key]['value_datatype'] == 'map':\n for record_value in record_values:\n record_size = sys.getsizeof(json.dumps(str(record_value)).replace(' ','')) - 51\n if record_size <= value:\n found = True\n break\n else:\n for record_value in record_values:\n if len(record_value) <= value:\n found = True\n break\n if not found:\n return False\n elif key == 'min_value':\n found = False\n for record_value in record_values:\n if record_value >= value:\n found = True\n break\n if not found:\n return False\n elif key == 'max_value':\n found = False\n for record_value in record_values:\n if record_value <= value:\n found = True\n break\n if not found:\n return False\n elif key == 'equal_to':\n found = False\n for record_value in record_values:\n if record_value == value:\n found = True\n break\n if not found:\n return False\n elif key == 'greater_than':\n found = False\n for record_value in record_values:\n if record_value > value:\n found = True\n break\n if not found:\n return False\n elif key == 'less_than':\n found = False\n for record_value in record_values:\n if record_value < value:\n found = True\n break\n if not found:\n return False\n elif key == 'excluded_values':\n for record_value in record_values:\n if record_value in value:\n return False\n elif key == 'discrete_values':\n found = False\n for record_value in record_values:\n if record_value in value:\n found = True\n break\n if not found:\n return False\n elif key == 'integer_data':\n found = False\n dummy_int = 1\n for record_value in record_values:\n if record_value.__class__ == dummy_int.__class__:\n found = True\n break\n if value != found:\n return False\n elif key == 'byte_data':\n found = False\n for record_value in record_values:\n try:\n decoded_bytes = b64decode(record_value)\n except:\n decoded_bytes = ''\n if isinstance(decoded_bytes, bytes):\n found = True\n break\n if value != found:\n return False\n elif key == 'must_contain':\n for regex in value:\n regex_pattern = re.compile(regex)\n found = False\n for record_value in record_values:\n if regex_pattern.findall(record_value):\n found = True\n break\n if not found:\n return False\n elif key == 'must_not_contain':\n for regex in value:\n regex_pattern = re.compile(regex)\n found = False\n for record_value in record_values:\n if regex_pattern.findall(record_value):\n found = True\n break\n if found:\n return False\n elif key == 'contains_either':\n found = False\n for regex in value:\n regex_pattern = re.compile(regex)\n for record_value in record_values:\n if regex_pattern.findall(record_value):\n found = True\n break\n if found:\n break\n if not found:\n return False\n elif key == 'unique_values':\n for record_value in record_values:\n unique_values = True\n if len(record_value) != len(set(record_value)):\n unique_values = False\n if value != unique_values:\n return False\n\n return True\n"
] |
class jsonModel(object):
__rules__ = jsonLoader('jsonmodel', 'models/model-rules.json')
def __init__(self, data_model, query_rules=None):
'''
a method for testing data model declaration & initializing the class
:param data_model: dictionary with json model architecture
:param query_rules: [optional] dictionary with valid field type qualifiers
:return: object with jsonModel methods
'''
# validate schema input
if not isinstance(data_model, dict):
raise ModelValidationError('Data model must be a dictionary.')
elif 'schema' not in data_model.keys():
raise ModelValidationError('Data model must have a schema key.')
elif not isinstance(data_model['schema'], dict):
raise ModelValidationError('Value for the data model "schema" field must be a dictionary.')
elif not data_model['schema']:
raise ModelValidationError('Data model "schema" field must not be empty.')
# construct base methods
from copy import deepcopy
data_model = deepcopy(data_model)
self.schema = data_model['schema']
model_map = mapModel(self.schema)
self.keyName = model_map.keyName
self.keyCriteria = model_map.keyCriteria
# construct protected type classes
self._datatype_names = mapModel._datatype_names
self._datatype_classes = mapModel._datatype_classes
# validate absence of item designators in keys
item_pattern = re.compile('\[\d+\]')
for i in range(len(self.keyName)):
patterns_found = item_pattern.findall(self.keyName[i])
if patterns_found:
for designator in patterns_found:
if designator != '[0]':
message = 'Key name for schema field %s must not contain the item designator pattern %s' % (self.keyName[i], designator)
raise ModelValidationError(message)
# validate existence of first item in list declarations
key_set = set(self.keyName)
for i in range(len(self.keyName)):
if self.keyCriteria[i]['value_datatype'] == 'list':
item_key = self.keyName[i] + '[0]'
if not item_key in key_set:
message = 'Schema field %s must declare an initial item for the list.' % self.keyName[i]
raise ModelValidationError(message)
# alter list requirement if first item is empty
else:
item_index = self.keyName.index(item_key)
if not self.keyCriteria[item_index]['required_field']:
self.keyCriteria[i]['required_field'] = False
# validate title input & construct title method
self.title = ''
if 'title' in data_model.keys():
if not isinstance(data_model['title'], str):
raise ModelValidationError('Value for model title must be a string.')
self.title = data_model['title']
# validate description input & construct description method
self.description = ''
if 'description' in data_model.keys():
if not isinstance(data_model['description'], str):
raise ModelValidationError('Value for model description must be a string.')
self.description = data_model['description']
# validate url input & construct title method
self.url = ''
if 'url' in data_model.keys():
if not isinstance(data_model['url'], str):
raise ModelValidationError('Value for model url must be a string.')
self.title = data_model['url']
# validate metadata input & construct metadata method
self.metadata = {}
if 'metadata' in data_model.keys():
if not isinstance(data_model['metadata'], dict):
raise ModelValidationError('Value for model metadata must be a dictionary.')
self.metadata = data_model['metadata']
# validate max size input & construct maxSize property
# self.maxSize = None
# if 'max_size' in data_model.keys():
# if not isinstance(data_model['max_size'], int):
# raise ModelValidationError('Value for model max_size must be a positive integer.')
# elif data_model['max_size'] < 0:
# raise ModelValidationError('Value for model max_size must be a positive integer.')
# elif data_model['max_size']:
# self.maxSize = data_model['max_size']
# validate components input & construct component property
self.components = {}
if 'components' in data_model.keys():
if not isinstance(data_model['components'], dict):
raise ModelValidationError('Value for model components must be a dictionary.')
self.components = self._validate_fields(data_model['components'], self.__rules__['components'])
# construct keyMap fields from key names and key criteria
self.keyMap = {}
for i in range(len(self.keyName)):
self.keyMap[self.keyName[i]] = self.keyCriteria[i]
for key, value in self.components.items():
# convert javascript dot_path to class dot_path
dot_key = ''
if not key:
dot_key = '.'
else:
if key[0] != '.':
dot_key = '.%s' % key
# add component declarations to keyMap
if key in self.keyMap.keys():
for k, v in self.components[key].items():
self.keyMap[key][k] = v
elif dot_key and dot_key in self.keyMap.keys():
for k, v in self.components[key].items():
self.keyMap[dot_key][k] = v
# validate default values in lists
self._validate_defaults(self.keyMap)
# construct queryRules property from class model rules
self.queryRules = {}
for key, value in self.__rules__['components'].items():
remove_from_query = [ 'required_field', 'default_value', 'example_values', 'field_title', 'field_position', 'field_description', 'field_metadata', 'extra_fields' ]
field_qualifiers = {
'value_exists': False
}
for k, v in value.items():
if k not in remove_from_query:
field_qualifiers[k] = v
self.queryRules[key] = field_qualifiers
# validate query rules input and replace queryRules property
if query_rules:
if not isinstance(query_rules, dict):
message = 'Value for query rules input must be a dictionary.'
raise ModelValidationError(message)
input_set = set(query_rules.keys())
req_set = set(self.queryRules.keys())
if input_set - req_set:
message = 'Query rules input may only have %s field key names.' % req_set
raise ModelValidationError(message)
elif req_set - input_set:
message = 'Query rules input must have all %s field key names.' % req_set
raise ModelValidationError(message)
for key in req_set:
if not isinstance(query_rules[key], dict):
message = 'Value for query rules %s field must be a dictionary.' % key
raise ModelValidationError(message)
input_qualifier_set = set(query_rules[key].keys())
req_qualifier_set = set(self.queryRules[key].keys())
if input_qualifier_set - req_qualifier_set:
message = 'Query rules field %s may only have qualifiers %s' % (key, req_qualifier_set)
raise ModelValidationError(message)
for k, v in query_rules[key].items():
if v.__class__ != self.queryRules[key][k].__class__:
qualifier_index = self._datatype_classes.index(self.queryRules[key][k].__class__)
qualifier_type = self._datatype_names[qualifier_index]
message = 'Value for query rules field %s qualifier %s must be a "%s" datatype.' % (key, k, qualifier_type)
raise ModelValidationError(message)
self.queryRules = query_rules
def _validate_fields(self, fields_dict, fields_rules, declared_value=True):
# validate key names in fields
for key, value in fields_dict.items():
# convert javascript dot_path to class dot_path
if not key:
key = '.'
else:
if key[0] != '.':
key = '.%s' % key
if key not in self.keyName:
raise ModelValidationError('Field %s is not a field declared in model schema.' % key)
elif not isinstance(value, dict):
raise ModelValidationError('Value for field %s must be a dictionary.' % key)
# validate field criteria are appropriate to field datatype
value_type = self.keyCriteria[self.keyName.index(key)]['value_datatype']
type_dict = {}
if value_type == 'string':
type_dict = fields_rules['.string_fields']
elif value_type == 'number':
type_dict = fields_rules['.number_fields']
elif value_type == 'boolean':
type_dict = fields_rules['.boolean_fields']
elif value_type == 'list':
type_dict = fields_rules['.list_fields']
elif value_type == 'map':
type_dict = fields_rules['.map_fields']
elif value_type == 'null':
type_dict = fields_rules['.null_fields']
if set(value.keys()) - set(type_dict.keys()):
raise ModelValidationError('Field %s may only have datatype %s qualifiers %s.' % (key, value_type, set(type_dict.keys())))
# validate criteria qualifier values are appropriate datatype
for k, v in value.items():
v_index = self._datatype_classes.index(v.__class__)
v_type = self._datatype_names[v_index]
qualifier_index = self._datatype_classes.index(type_dict[k].__class__)
qualifier_type = self._datatype_names[qualifier_index]
if v_type != qualifier_type:
message = 'Value for field %s qualifier %s must be a %s datatype.' % (key, k, qualifier_type)
raise ModelValidationError(message)
if qualifier_type == 'number':
if isinstance(type_dict[k], int):
if not isinstance(v, int):
message = 'Value for field %s qualifier %s must be an integer.' % (key, k)
raise ModelValidationError(message)
# validate internal logic of each qualifier value declaration
if k in ('must_not_contain', 'must_contain', 'contains_either'):
for item in v:
if not isinstance(item, str):
message = 'Each item in list field %s qualifier %s must be a string.' % (key, k)
raise ModelValidationError(message)
if k in ('min_length', 'max_length', 'min_size', 'max_size'):
if v < 0:
message = 'Value for field %s qualifier %s cannot be negative.' % (key, k)
raise ModelValidationError(message)
if k in ('discrete_values', 'excluded_values', 'example_values'):
for item in v:
if value_type == 'number':
if not isinstance(item, int) and not isinstance(item, float):
message = 'Each item in field %s qualifier %s list must be a number.' % (key, k)
raise ModelValidationError(message)
elif not isinstance(item, str):
message = 'Each item in list for field %s qualifier %s must be a string.' % (key, k)
raise ModelValidationError(message)
if k == 'identical_to':
if not v in self.keyName:
message = 'Value "%s" for field %s qualifier %s not found in components keys.' % (v, key, k)
raise ModelValidationError(message)
if k == 'unique_values':
if v:
item_name = key + '[0]'
item_type = self.keyCriteria[self.keyName.index(item_name)]['value_datatype']
if not item_type in ('number', 'string'):
message = 'Field %s[0] must be either a string or number if qualifier "unique_values": true' % key
raise ModelValidationError(message)
# validate lack of other qualifiers if value exist is false
if 'value_exists' in value.keys():
if not value['value_exists']:
if set(value.keys()) - {'value_exists'}:
message = 'If field %s qualifier value_exists: false, field may not have other qualifiers.' % key
raise ModelValidationError(message)
# validate size qualifiers against each other
size_qualifiers = ['min_size', 'max_size']
for qualifier in size_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
header = 'Value %s for %s' % (test_value, value_path)
if 'min_size' in value.keys():
if test_value < value['min_size']:
message = '%s must not be less than "min_size": %s' % (header, value['min_size'])
raise ModelValidationError(message)
if 'max_size' in value.keys():
if test_value > value['max_size']:
message = '%s must not be greater than "max_size": %s' % (header, value['max_size'])
raise ModelValidationError(message)
# validate length qualifiers against each other
length_qualifiers = ['min_length', 'max_length']
for qualifier in length_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
header = 'Value %s for %s' % (test_value, value_path)
if 'min_length' in value.keys():
if test_value < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if test_value > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
# validate range qualifiers against each other & length qualifiers
range_qualifiers = ['min_value', 'max_value', 'greater_than', 'less_than', 'equal_to']
for qualifier in range_qualifiers:
if qualifier in value.keys():
test_value = value[qualifier]
value_path = 'field %s qualifier %s' % (key, qualifier)
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
header = 'Value %s%s%s for %s' % (quote_text, test_value, quote_text, value_path)
if 'min_value' in value.keys():
if test_value < value['min_value']:
message = '%s must not be less than "min_value": %s' % (header, value['min_value'])
raise ModelValidationError(message)
if 'max_value' in value.keys():
if test_value > value['max_value']:
message = '%s must not be greater than "max_value": %s' % (header, value['max_value'])
raise ModelValidationError(message)
if 'greater_than' in value.keys():
if test_value <= value['greater_than'] and not qualifier == 'greater_than':
message = '%s must be "greater_than": %s' % (header, value['greater_than'])
raise ModelValidationError(message)
if 'less_than' in value.keys():
if test_value >= value['less_than'] and not qualifier == 'less_than':
message = '%s must be "less_than": %s' % (header, value['less_than'])
raise ModelValidationError(message)
if 'min_length' in value.keys():
if len(test_value) < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if len(test_value) > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
if 'integer_data' in value.keys():
if value['integer_data']:
if not isinstance(test_value, int):
message = '%s must be an "integer_data".' % header
raise ModelValidationError(message)
if 'must_not_contain' in value.keys():
for regex in value['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
message = '%s matches regex pattern in "must_not_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'must_contain' in value.keys():
for regex in value['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(test_value):
message = '%s does not match regex pattern in "must_contain": ["%s"].' % (header, regex)
raise ModelValidationError(message)
if 'contains_either' in value.keys():
regex_match = False
regex_patterns = []
for regex in value['contains_either']:
regex_patterns.append(regex)
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
regex_match = True
if not regex_match:
message = '%s does not match any regex patterns in "contains_either": %s' % (header, regex_patterns)
raise ModelValidationError(message)
if 'byte_data' in value.keys():
if value['byte_data']:
if qualifier != 'equal_to':
message = '%s cannot be used with base64 encoded "byte_data".' % header
raise ModelValidationError(message)
# validate discrete value qualifiers against other criteria
schema_field = self.keyCriteria[self.keyName.index(key)]
discrete_qualifiers = ['declared_value', 'default_value', 'excluded_values', 'discrete_values', 'example_values']
for qualifier in discrete_qualifiers:
test_qualifier = False
if qualifier in schema_field:
test_qualifier = True
if qualifier == 'declared_value' and not schema_field[qualifier]:
test_qualifier = False
if qualifier in value.keys() or (test_qualifier and declared_value):
multiple_values = False
if qualifier in value.keys():
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
else:
test_list = [schema_field[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
header = 'Value %s%s%s for %s%s' % (quote_text, test_value, quote_text, value_path, item_text)
if 'min_value' in value.keys():
if test_value < value['min_value']:
message = '%s must not be less than "min_value": %s' % (header, value['min_value'])
raise ModelValidationError(message)
if 'max_value' in value.keys():
if test_value > value['max_value']:
message = '%s must not be greater than "max_value": %s' % (header, value['max_value'])
raise ModelValidationError(message)
if 'equal_to' in value.keys():
if test_value != value['equal_to']:
if qualifier != 'declared_value' and isinstance(test_value, bool):
message = '%s must be "equal_to": %s' % (header, value['equal_to'])
raise ModelValidationError(message)
if 'greater_than' in value.keys():
if test_value <= value['greater_than']:
message = '%s must be "greater_than": %s' % (header, value['greater_than'])
raise ModelValidationError(message)
if 'less_than' in value.keys():
if test_value >= value['less_than']:
message = '%s must be "less_than": %s' % (header, value['less_than'])
raise ModelValidationError(message)
if 'integer_data' in value.keys():
if value['integer_data']:
if not isinstance(test_value, int):
message = '%s must be an "integer_data".' % header
raise ModelValidationError(message)
if 'min_length' in value.keys():
if len(test_value) < value['min_length']:
message = '%s must be at least "min_length": %s' % (header, value['min_length'])
raise ModelValidationError(message)
if 'max_length' in value.keys():
if len(test_value) > value['max_length']:
message = '%s cannot be more than "max_length": %s' % (header, value['max_length'])
raise ModelValidationError(message)
if 'must_not_contain' in value.keys():
for regex in value['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
message = '%s matches regex pattern in "must_not_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'must_contain' in value.keys():
for regex in value['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(test_value):
message = '%s does not match regex pattern in "must_contain": ["%s"]' % (header, regex)
raise ModelValidationError(message)
if 'contains_either' in value.keys():
regex_match = False
regex_patterns = []
for regex in value['contains_either']:
regex_patterns.append(regex)
regex_pattern = re.compile(regex)
if regex_pattern.findall(test_value):
regex_match = True
if not regex_match:
message = '%s does not match any regex patterns in "contains_either": %s' % (header, regex_patterns)
raise ModelValidationError(message)
if 'byte_data' in value.keys():
message = '%s cannot be base64 decoded to "byte_data".' % header
try:
decoded_bytes = b64decode(test_value)
except:
raise ModelValidationError(message)
if not isinstance(decoded_bytes, bytes):
raise ModelValidationError(message)
# validate discrete value qualifiers against each other
for qualifier in discrete_qualifiers:
test_qualifier = False
if qualifier in schema_field:
test_qualifier = True
if qualifier == 'declared_value' and not schema_field[qualifier]:
test_qualifier = False
if qualifier in value.keys() or (test_qualifier and declared_value):
multiple_values = False
if qualifier in value.keys():
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
else:
test_list = [schema_field[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
header = 'Value %s%s%s for %s%s' % (quote_text, test_value, quote_text, value_path, item_text)
if 'excluded_values' in value.keys():
if not qualifier == 'excluded_values':
if test_value in value['excluded_values']:
message = '%s cannot be one of "excluded_values": %s.' % (header, value['excluded_values'])
raise ModelValidationError(message)
if 'discrete_values' in value.keys():
if not qualifier == 'excluded_values':
if test_value not in value['discrete_values']:
message = '%s must be one of "discrete_values": %s' % (header, value['discrete_values'])
raise ModelValidationError(message)
return fields_dict
def _validate_defaults(self, fields_dict):
# validate key names in fields
for key, value in fields_dict.items():
# retrieve value type and type dict
value_type = self.keyCriteria[self.keyName.index(key)]['value_datatype']
# validate discrete value qualifiers against other criteria
qualifier = 'default_value'
if qualifier in value.keys():
multiple_values = False
if isinstance(value[qualifier], list):
test_list = value[qualifier]
multiple_values = True
else:
test_list = [value[qualifier]]
value_path = 'field %s qualifier %s' % (key, qualifier)
for i in range(len(test_list)):
test_value = test_list[i]
quote_text = ''
if isinstance(test_value, str):
quote_text = '"'
item_text = ''
if multiple_values:
item_text = '[%s]' % i
qualifier_text = value_path + item_text
header = 'Value %s%s%s for %s' % (quote_text, test_value, quote_text, qualifier_text)
if value_type == 'list':
default_item_key = '%s[0]' % key
try:
self.validate(test_value, default_item_key, object_title=header)
except Exception as err:
raise ModelValidationError(str(err).strip().replace('field %s' % default_item_key, qualifier_text))
return fields_dict
def _evaluate_field(self, record_dict, field_name, field_criteria):
''' a helper method for evaluating record values based upon query criteria
:param record_dict: dictionary with model valid data to evaluate
:param field_name: string with path to root of query field
:param field_criteria: dictionary with query operators and qualifiers
:return: boolean (True if no field_criteria evaluate to false)
'''
# determine value existence criteria
value_exists = True
if 'value_exists' in field_criteria.keys():
if not field_criteria['value_exists']:
value_exists = False
# validate existence of field
field_exists = True
try:
record_values = self._walk(field_name, record_dict)
except:
field_exists = False
# evaluate existence query criteria
if value_exists != field_exists:
return False
elif not value_exists:
return True
# convert javascript dot_path to class dot_path
field_key = field_name
if not field_name:
field_key = '.'
else:
if field_name[0] != '.':
field_key = '.%s' % field_name
# evaluate other query criteria
for key, value in field_criteria.items():
if key in ('min_size', 'min_length'):
found = False
if self.keyMap[field_key]['value_datatype'] == 'map':
for record_value in record_values:
record_size = sys.getsizeof(json.dumps(str(record_value)).replace(' ','')) - 51
if record_size >= value:
found = True
break
else:
for record_value in record_values:
if len(record_value) >= value:
found = True
break
if not found:
return False
elif key in ('max_size', 'max_length'):
found = False
if self.keyMap[field_key]['value_datatype'] == 'map':
for record_value in record_values:
record_size = sys.getsizeof(json.dumps(str(record_value)).replace(' ','')) - 51
if record_size <= value:
found = True
break
else:
for record_value in record_values:
if len(record_value) <= value:
found = True
break
if not found:
return False
elif key == 'min_value':
found = False
for record_value in record_values:
if record_value >= value:
found = True
break
if not found:
return False
elif key == 'max_value':
found = False
for record_value in record_values:
if record_value <= value:
found = True
break
if not found:
return False
elif key == 'equal_to':
found = False
for record_value in record_values:
if record_value == value:
found = True
break
if not found:
return False
elif key == 'greater_than':
found = False
for record_value in record_values:
if record_value > value:
found = True
break
if not found:
return False
elif key == 'less_than':
found = False
for record_value in record_values:
if record_value < value:
found = True
break
if not found:
return False
elif key == 'excluded_values':
for record_value in record_values:
if record_value in value:
return False
elif key == 'discrete_values':
found = False
for record_value in record_values:
if record_value in value:
found = True
break
if not found:
return False
elif key == 'integer_data':
found = False
dummy_int = 1
for record_value in record_values:
if record_value.__class__ == dummy_int.__class__:
found = True
break
if value != found:
return False
elif key == 'byte_data':
found = False
for record_value in record_values:
try:
decoded_bytes = b64decode(record_value)
except:
decoded_bytes = ''
if isinstance(decoded_bytes, bytes):
found = True
break
if value != found:
return False
elif key == 'must_contain':
for regex in value:
regex_pattern = re.compile(regex)
found = False
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if not found:
return False
elif key == 'must_not_contain':
for regex in value:
regex_pattern = re.compile(regex)
found = False
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if found:
return False
elif key == 'contains_either':
found = False
for regex in value:
regex_pattern = re.compile(regex)
for record_value in record_values:
if regex_pattern.findall(record_value):
found = True
break
if found:
break
if not found:
return False
elif key == 'unique_values':
for record_value in record_values:
unique_values = True
if len(record_value) != len(set(record_value)):
unique_values = False
if value != unique_values:
return False
return True
def _validate_dict(self, input_dict, schema_dict, path_to_root, object_title=''):
''' a helper method for recursively validating keys in dictionaries
:return input_dict
'''
# reconstruct key path to current dictionary in model
rules_top_level_key = re.sub('\[\d+\]', '[0]', path_to_root)
map_rules = self.keyMap[rules_top_level_key]
# construct list error report template
map_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': map_rules,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': 0,
'error_code': 4001
}
# validate map size
if 'min_size' in map_rules.keys():
input_size = sys.getsizeof(json.dumps(str(input_dict)).replace(' ','')) - 51
if input_size < map_rules['min_size']:
map_error['failed_test'] = 'min_size'
map_error['error_value'] = input_size
map_error['error_code'] = 4031
raise InputValidationError(map_error)
if 'max_size' in map_rules.keys():
input_size = sys.getsizeof(json.dumps(str(input_dict)).replace(' ','')) - 51
if input_size > map_rules['max_size']:
map_error['failed_test'] = 'max_size'
map_error['error_value'] = input_size
map_error['error_code'] = 4032
raise InputValidationError(map_error)
# construct lists of keys in input dictionary
input_keys = []
input_key_list = []
for key in input_dict.keys():
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'key_datatype',
'input_path': path_to_root,
'error_value': key,
'error_code': 4004
}
error_dict['input_criteria']['key_datatype'] = 'string'
if path_to_root == '.':
if not isinstance(key, str):
input_key_name = path_to_root + str(key)
error_dict['input_path'] = input_key_name
raise InputValidationError(error_dict)
input_key_name = path_to_root + key
else:
if not isinstance(key, str):
input_key_name = path_to_root + '.' + str(key)
error_dict['input_path'] = input_key_name
raise InputValidationError(error_dict)
input_key_name = path_to_root + '.' + key
input_keys.append(input_key_name)
input_key_list.append(key)
# TODO: validate top-level key and values against identical to reference
# TODO: run lambda function and call validation
# construct lists of keys in schema dictionary
max_keys = []
max_key_list = []
req_keys = []
req_key_list = []
for key in schema_dict.keys():
if path_to_root == '.':
schema_key_name = path_to_root + key
else:
schema_key_name = path_to_root + '.' + key
max_keys.append(schema_key_name)
max_key_list.append(key)
rules_schema_key_name = re.sub('\[\d+\]', '[0]', schema_key_name)
if self.keyMap[rules_schema_key_name]['required_field']:
req_keys.append(schema_key_name)
req_key_list.append(key)
# validate existence of required fields
missing_keys = set(req_keys) - set(input_keys)
if missing_keys:
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'required_field',
'input_path': path_to_root,
'error_value': list(missing_keys),
'error_code': 4002
}
error_dict['input_criteria']['required_keys'] = req_keys
raise InputValidationError(error_dict)
# validate existence of extra fields
extra_keys = set(input_keys) - set(max_keys)
if extra_keys and not self.keyMap[rules_top_level_key]['extra_fields']:
extra_key_list = []
for key in extra_keys:
pathless_key = re.sub(rules_top_level_key, '', key, count=1)
extra_key_list.append(pathless_key)
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[rules_top_level_key],
'failed_test': 'extra_fields',
'input_path': path_to_root,
'error_value': extra_key_list,
'error_code': 4003
}
error_dict['input_criteria']['maximum_scope'] = max_key_list
raise InputValidationError(error_dict)
# validate datatype of value
for key, value in input_dict.items():
if path_to_root == '.':
input_key_name = path_to_root + key
else:
input_key_name = path_to_root + '.' + key
rules_input_key_name = re.sub('\[\d+\]', '[0]', input_key_name)
if input_key_name in max_keys:
input_criteria = self.keyMap[rules_input_key_name]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': input_key_name,
'error_value': value,
'error_code': 4001
}
try:
value_index = self._datatype_classes.index(value.__class__)
except:
error_dict['error_value'] = value.__class__.__name__
raise InputValidationError(error_dict)
value_type = self._datatype_names[value_index]
if input_criteria['value_datatype'] == 'null':
pass
else:
if value_type != input_criteria['value_datatype']:
raise InputValidationError(error_dict)
# call appropriate validation sub-routine for datatype of value
if value_type == 'boolean':
input_dict[key] = self._validate_boolean(value, input_key_name, object_title)
elif value_type == 'number':
input_dict[key] = self._validate_number(value, input_key_name, object_title)
elif value_type == 'string':
input_dict[key] = self._validate_string(value, input_key_name, object_title)
elif value_type == 'map':
input_dict[key] = self._validate_dict(value, schema_dict[key], input_key_name, object_title)
elif value_type == 'list':
input_dict[key] = self._validate_list(value, schema_dict[key], input_key_name, object_title)
# set default values for empty optional fields
for key in max_key_list:
if key not in input_key_list:
indexed_key = max_keys[max_key_list.index(key)]
if indexed_key in self.components.keys():
if 'default_value' in self.components[indexed_key]:
input_dict[key] = self.components[indexed_key]['default_value']
return input_dict
def _validate_list(self, input_list, schema_list, path_to_root, object_title=''):
'''
a helper method for recursively validating items in a list
:return: input_list
'''
# construct rules for list and items
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
list_rules = self.keyMap[rules_path_to_root]
initial_key = rules_path_to_root + '[0]'
item_rules = self.keyMap[initial_key]
# construct list error report template
list_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': list_rules,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': 0,
'error_code': 4001
}
# validate list rules
if 'min_size' in list_rules.keys():
if len(input_list) < list_rules['min_size']:
list_error['failed_test'] = 'min_size'
list_error['error_value'] = len(input_list)
list_error['error_code'] = 4031
raise InputValidationError(list_error)
if 'max_size' in list_rules.keys():
if len(input_list) > list_rules['max_size']:
list_error['failed_test'] = 'max_size'
list_error['error_value'] = len(input_list)
list_error['error_code'] = 4032
raise InputValidationError(list_error)
# construct item error report template
item_error = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': item_rules,
'failed_test': 'value_datatype',
'input_path': initial_key,
'error_value': None,
'error_code': 4001
}
# validate datatype of items
for i in range(len(input_list)):
input_path = path_to_root + '[%s]' % i
item = input_list[i]
item_error['input_path'] = input_path
try:
item_index = self._datatype_classes.index(item.__class__)
except:
item_error['error_value'] = item.__class__.__name__
raise InputValidationError(item_error)
item_type = self._datatype_names[item_index]
item_error['error_value'] = item
if item_rules['value_datatype'] == 'null':
pass
else:
if item_type != item_rules['value_datatype']:
raise InputValidationError(item_error)
# call appropriate validation sub-routine for datatype of item
if item_type == 'boolean':
input_list[i] = self._validate_boolean(item, input_path, object_title)
elif item_type == 'number':
input_list[i] = self._validate_number(item, input_path, object_title)
elif item_type == 'string':
input_list[i] = self._validate_string(item, input_path, object_title)
elif item_type == 'map':
input_list[i] = self._validate_dict(item, schema_list[0], input_path, object_title)
elif item_type == 'list':
input_list[i] = self._validate_list(item, schema_list[0], input_path, object_title)
# validate unique values in list
if 'unique_values' in list_rules.keys():
if len(set(input_list)) < len(input_list):
list_error['failed_test'] = 'unique_values'
list_error['error_value'] = input_list
list_error['error_code'] = 4033
raise InputValidationError(list_error)
# TODO: validate top-level item values against identical to reference
# TODO: run lambda function and call validation url
return input_list
def _validate_number(self, input_number, path_to_root, object_title=''):
'''
a helper method for validating properties of a number
:return: input_number
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_number,
'error_code': 4001
}
if 'integer_data' in input_criteria.keys():
if input_criteria['integer_data'] and not isinstance(input_number, int):
error_dict['failed_test'] = 'integer_data'
error_dict['error_code'] = 4021
raise InputValidationError(error_dict)
if 'min_value' in input_criteria.keys():
if input_number < input_criteria['min_value']:
error_dict['failed_test'] = 'min_value'
error_dict['error_code'] = 4022
raise InputValidationError(error_dict)
if 'max_value' in input_criteria.keys():
if input_number > input_criteria['max_value']:
error_dict['failed_test'] = 'max_value'
error_dict['error_code'] = 4023
raise InputValidationError(error_dict)
if 'greater_than' in input_criteria.keys():
if input_number <= input_criteria['greater_than']:
error_dict['failed_test'] = 'greater_than'
error_dict['error_code'] = 4024
raise InputValidationError(error_dict)
if 'less_than' in input_criteria.keys():
if input_number >= input_criteria['less_than']:
error_dict['failed_test'] = 'less_than'
error_dict['error_code'] = 4025
raise InputValidationError(error_dict)
if 'equal_to' in input_criteria.keys():
if input_number != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
if 'discrete_values' in input_criteria.keys():
if input_number not in input_criteria['discrete_values']:
error_dict['failed_test'] = 'discrete_values'
error_dict['error_code'] = 4041
raise InputValidationError(error_dict)
if 'excluded_values' in input_criteria.keys():
if input_number in input_criteria['excluded_values']:
error_dict['failed_test'] = 'excluded_values'
error_dict['error_code'] = 4042
raise InputValidationError(error_dict)
# TODO: validate number against identical to reference
# TODO: run lambda function and call validation url
return input_number
def _validate_string(self, input_string, path_to_root, object_title=''):
'''
a helper method for validating properties of a string
:return: input_string
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_string,
'error_code': 4001
}
if 'byte_data' in input_criteria.keys():
if input_criteria['byte_data']:
error_dict['failed_test'] = 'byte_data'
error_dict['error_code'] = 4011
try:
decoded_bytes = b64decode(input_string)
except:
raise InputValidationError(error_dict)
if not isinstance(decoded_bytes, bytes):
raise InputValidationError(error_dict)
if 'min_value' in input_criteria.keys():
if input_string < input_criteria['min_value']:
error_dict['failed_test'] = 'min_value'
error_dict['error_code'] = 4022
raise InputValidationError(error_dict)
if 'max_value' in input_criteria.keys():
if input_string > input_criteria['max_value']:
error_dict['failed_test'] = 'max_value'
error_dict['error_code'] = 4023
raise InputValidationError(error_dict)
if 'greater_than' in input_criteria.keys():
if input_string <= input_criteria['greater_than']:
error_dict['failed_test'] = 'greater_than'
error_dict['error_code'] = 4024
raise InputValidationError(error_dict)
if 'less_than' in input_criteria.keys():
if input_string >= input_criteria['less_than']:
error_dict['failed_test'] = 'less_than'
error_dict['error_code'] = 4025
raise InputValidationError(error_dict)
if 'equal_to' in input_criteria.keys():
if input_string != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
if 'min_length' in input_criteria.keys():
if len(input_string) < input_criteria['min_length']:
error_dict['failed_test'] = 'min_length'
error_dict['error_code'] = 4012
raise InputValidationError(error_dict)
if 'max_length' in input_criteria.keys():
if len(input_string) > input_criteria['max_length']:
error_dict['failed_test'] = 'max_length'
error_dict['error_code'] = 4013
raise InputValidationError(error_dict)
if 'must_not_contain' in input_criteria.keys():
for regex in input_criteria['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(input_string):
error_dict['failed_test'] = 'must_not_contain'
error_dict['error_code'] = 4014
raise InputValidationError(error_dict)
if 'must_contain' in input_criteria.keys():
for regex in input_criteria['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(input_string):
error_dict['failed_test'] = 'must_contain'
error_dict['error_code'] = 4015
raise InputValidationError(error_dict)
if 'contains_either' in input_criteria.keys():
regex_match = False
for regex in input_criteria['contains_either']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(input_string):
regex_match = True
if not regex_match:
error_dict['failed_test'] = 'contains_either'
error_dict['error_code'] = 4016
raise InputValidationError(error_dict)
if 'discrete_values' in input_criteria.keys():
if input_string not in input_criteria['discrete_values']:
error_dict['failed_test'] = 'discrete_values'
error_dict['error_code'] = 4041
raise InputValidationError(error_dict)
if 'excluded_values' in input_criteria.keys():
if input_string in input_criteria['excluded_values']:
error_dict['failed_test'] = 'excluded_values'
error_dict['error_code'] = 4042
raise InputValidationError(error_dict)
# TODO: validate string against identical to reference
# TODO: run lambda function and call validation url
return input_string
def _validate_boolean(self, input_boolean, path_to_root, object_title=''):
'''
a helper method for validating properties of a boolean
:return: input_boolean
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_boolean,
'error_code': 4001
}
if 'equal_to' in input_criteria.keys():
if input_boolean != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
# TODO: validate boolean against identical to reference
# TODO: run lambda function and call validation url
return input_boolean
def _ingest_dict(self, input_dict, schema_dict, path_to_root):
'''
a helper method for ingesting keys, value pairs in a dictionary
:return: valid_dict
'''
valid_dict = {}
# construct path to root for rules
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
# iterate over keys in schema dict
for key, value in schema_dict.items():
key_path = path_to_root
if not key_path == '.':
key_path += '.'
key_path += key
rules_key_path = re.sub('\[\d+\]', '[0]', key_path)
value_match = False
if key in input_dict.keys():
value_index = self._datatype_classes.index(value.__class__)
value_type = self._datatype_names[value_index]
try:
v_index = self._datatype_classes.index(input_dict[key].__class__)
v_type = self._datatype_names[v_index]
if v_type == value_type:
value_match = True
except:
value_match = False
if value_match:
if value_type == 'null':
valid_dict[key] = input_dict[key]
elif value_type == 'boolean':
valid_dict[key] = self._ingest_boolean(input_dict[key], key_path)
elif value_type == 'number':
valid_dict[key] = self._ingest_number(input_dict[key], key_path)
elif value_type == 'string':
valid_dict[key] = self._ingest_string(input_dict[key], key_path)
elif value_type == 'map':
valid_dict[key] = self._ingest_dict(input_dict[key], schema_dict[key], key_path)
elif value_type == 'list':
valid_dict[key] = self._ingest_list(input_dict[key], schema_dict[key], key_path)
else:
value_type = self.keyMap[rules_key_path]['value_datatype']
if 'default_value' in self.keyMap[rules_key_path]:
valid_dict[key] = self.keyMap[rules_key_path]['default_value']
elif value_type == 'null':
valid_dict[key] = None
elif value_type == 'boolean':
valid_dict[key] = False
elif value_type == 'number':
valid_dict[key] = 0.0
if 'integer_data' in self.keyMap[rules_key_path].keys():
if self.keyMap[rules_key_path]['integer_data']:
valid_dict[key] = 0
elif value_type == 'string':
valid_dict[key] = ''
elif value_type == 'list':
valid_dict[key] = []
elif value_type == 'map':
valid_dict[key] = self._ingest_dict({}, schema_dict[key], key_path)
# add extra fields if set to True
if self.keyMap[rules_path_to_root]['extra_fields']:
for key, value in input_dict.items():
if key not in valid_dict.keys():
valid_dict[key] = value
return valid_dict
def _ingest_list(self, input_list, schema_list, path_to_root):
'''
a helper method for ingesting items in a list
:return: valid_list
'''
valid_list = []
# construct max list size
max_size = None
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'max_size' in self.keyMap[rules_path_to_root].keys():
if not self.keyMap[rules_path_to_root]['max_size']:
return valid_list
else:
max_size = self.keyMap[rules_path_to_root]['max_size']
# iterate over items in input list
if input_list:
rules_index = self._datatype_classes.index(schema_list[0].__class__)
rules_type = self._datatype_names[rules_index]
for i in range(len(input_list)):
item_path = '%s[%s]' % (path_to_root, i)
value_match = False
try:
item_index = self._datatype_classes.index(input_list[i].__class__)
item_type = self._datatype_names[item_index]
if item_type == rules_type:
value_match = True
except:
value_match = False
if value_match:
try:
if item_type == 'boolean':
valid_list.append(self._validate_boolean(input_list[i], item_path))
elif item_type == 'number':
valid_list.append(self._validate_number(input_list[i], item_path))
elif item_type == 'string':
valid_list.append(self._validate_string(input_list[i], item_path))
elif item_type == 'map':
valid_list.append(self._ingest_dict(input_list[i], schema_list[0], item_path))
elif item_type == 'list':
valid_list.append(self._ingest_list(input_list[i], schema_list[0], item_path))
except:
pass
if isinstance(max_size, int):
if len(valid_list) == max_size:
return valid_list
return valid_list
def _ingest_number(self, input_number, path_to_root):
'''
a helper method for ingesting a number
:return: valid_number
'''
valid_number = 0.0
try:
valid_number = self._validate_number(input_number, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_number = self.keyMap[rules_path_to_root]['default_value']
elif 'integer_data' in self.keyMap[rules_path_to_root].keys():
if self.keyMap[rules_path_to_root]['integer_data']:
valid_number = 0
return valid_number
def _ingest_string(self, input_string, path_to_root):
'''
a helper method for ingesting a string
:return: valid_string
'''
valid_string = ''
try:
valid_string = self._validate_string(input_string, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_string = self.keyMap[rules_path_to_root]['default_value']
return valid_string
def _ingest_boolean(self, input_boolean, path_to_root):
'''
a helper method for ingesting a boolean
:return: valid_boolean
'''
valid_boolean = False
try:
valid_boolean = self._validate_boolean(input_boolean, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_boolean = self.keyMap[rules_path_to_root]['default_value']
return valid_boolean
def _reconstruct(self, path_to_root):
'''
a helper method for finding the schema endpoint from a path to root
:param path_to_root: string with dot path to root from
:return: list, dict, string, number, or boolean at path to root
'''
# split path to root into segments
item_pattern = re.compile('\d+\\]')
dot_pattern = re.compile('\\.|\\[')
path_segments = dot_pattern.split(path_to_root)
# construct base schema endpoint
schema_endpoint = self.schema
# reconstruct schema endpoint from segments
if path_segments[1]:
for i in range(1,len(path_segments)):
if item_pattern.match(path_segments[i]):
schema_endpoint = schema_endpoint[0]
else:
schema_endpoint = schema_endpoint[path_segments[i]]
return schema_endpoint
def _walk(self, path_to_root, record_dict):
'''
a helper method for finding the record endpoint from a path to root
:param path_to_root: string with dot path to root from
:param record_dict:
:return: list, dict, string, number, or boolean at path to root
'''
# split path to root into segments
item_pattern = re.compile('\d+\\]')
dot_pattern = re.compile('\\.|\\[')
path_segments = dot_pattern.split(path_to_root)
# construct empty fields
record_endpoints = []
# determine starting position
if not path_segments[0]:
path_segments.pop(0)
# define internal recursive function
def _walk_int(path_segments, record_dict):
record_endpoint = record_dict
for i in range(0, len(path_segments)):
if item_pattern.match(path_segments[i]):
for j in range(0, len(record_endpoint)):
if len(path_segments) == 2:
record_endpoints.append(record_endpoint[j])
else:
stop_chain = False
for x in range(0, i):
if item_pattern.match(path_segments[x]):
stop_chain = True
if not stop_chain:
shortened_segments = []
for z in range(i + 1, len(path_segments)):
shortened_segments.append(path_segments[z])
_walk_int(shortened_segments, record_endpoint[j])
else:
stop_chain = False
for y in range(0, i):
if item_pattern.match(path_segments[y]):
stop_chain = True
if not stop_chain:
if len(path_segments) == i + 1:
record_endpoints.append(record_endpoint[path_segments[i]])
else:
record_endpoint = record_endpoint[path_segments[i]]
# conduct recursive walk
_walk_int(path_segments, record_dict)
return record_endpoints
def validate(self, input_data, path_to_root='', object_title=''):
'''
a core method for validating input against the model
input_data is only returned if all data is valid
:param input_data: list, dict, string, number, or boolean to validate
:param path_to_root: [optional] string with dot-path of model component
:param object_title: [optional] string with name of input to validate
:return: input_data (or InputValidationError)
'''
__name__ = '%s.validate' % self.__class__.__name__
_path_arg = '%s(path_to_root="...")' % __name__
_title_arg = '%s(object_title="...")' % __name__
# validate input
copy_path = path_to_root
if path_to_root:
if not isinstance(path_to_root, str):
raise ModelValidationError('%s must be a string.' % _path_arg)
else:
if path_to_root[0] != '.':
copy_path = '.%s' % path_to_root
if not copy_path in self.keyMap.keys():
raise ModelValidationError('%s does not exist in components %s.' % (_path_arg.replace('...', path_to_root), self.keyMap.keys()))
else:
copy_path = '.'
if object_title:
if not isinstance(object_title, str):
raise ModelValidationError('%s must be a string' % _title_arg)
# construct generic error dictionary
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': self.keyMap[copy_path],
'failed_test': 'value_datatype',
'input_path': copy_path,
'error_value': input_data,
'error_code': 4001
}
# determine value type of input data
try:
input_index = self._datatype_classes.index(input_data.__class__)
except:
error_dict['error_value'] = input_data.__class__.__name__
raise InputValidationError(error_dict)
input_type = self._datatype_names[input_index]
# validate input data type
if input_type != self.keyMap[copy_path]['value_datatype']:
raise InputValidationError(error_dict)
# run helper method appropriate to data type
if input_type == 'boolean':
input_data = self._validate_boolean(input_data, copy_path, object_title)
elif input_type == 'number':
input_data = self._validate_number(input_data, copy_path, object_title)
elif input_type == 'string':
input_data = self._validate_string(input_data, copy_path, object_title)
elif input_type == 'list':
schema_list = self._reconstruct(copy_path)
input_data = self._validate_list(input_data, schema_list, copy_path, object_title)
elif input_type == 'map':
schema_dict = self._reconstruct(copy_path)
input_data = self._validate_dict(input_data, schema_dict, copy_path, object_title)
return input_data
def ingest(self, **kwargs):
'''
a core method to ingest and validate arbitrary keyword data
**NOTE: data is always returned with this method**
for each key in the model, a value is returned according
to the following priority:
1. value in kwargs if field passes validation test
2. default value declared for the key in the model
3. empty value appropriate to datatype of key in the model
**NOTE: as long as a default value is provided for each key-
value, returned data will be model valid
**NOTE: if 'extra_fields' is True for a dictionary, the key-
value pair of all fields in kwargs which are not declared in
the model will also be added to the corresponding dictionary
data
**NOTE: if 'max_size' is declared for a list, method will
stop adding input to the list once it reaches max size
:param kwargs: key, value pairs
:return: dictionary with keys and value
'''
__name__ = '%s.ingest' % self.__class__.__name__
schema_dict = self.schema
path_to_root = '.'
valid_data = self._ingest_dict(kwargs, schema_dict, path_to_root)
return valid_data
|
collectiveacuity/jsonModel
|
jsonmodel/_extensions.py
|
tabulate
|
python
|
def tabulate(self, format='html', syntax=''):
'''
a function to create a table from the class model keyMap
:param format: string with format for table output
:param syntax: [optional] string with linguistic syntax
:return: string with table
'''
from tabulate import tabulate as _tabulate
# define headers
headers = ['Field', 'Datatype', 'Required', 'Default', 'Examples', 'Conditionals', 'Description']
rows = []
default_values = False
additional_conditions = False
field_description = False
# construct rows
for key, value in self.keyMap.items():
key_segments = _segment_path(key)
if key_segments:
row = []
# add field column
field_name = ''
if len(key_segments) > 1:
for i in range(1,len(key_segments)):
field_name += ' '
if key_segments[-1] == '0':
field_name += '<i>item</i>'
else:
field_name += key_segments[-1]
row.append(field_name)
# add datatype column
value_datatype = value['value_datatype']
if 'integer_data' in value.keys():
if value['integer_data'] and syntax != 'javascript':
value_datatype = 'integer'
elif value['value_datatype'] == 'map':
if syntax == 'javascript':
value_datatype = 'object'
elif value['value_datatype'] == 'list':
if syntax == 'javascript':
value_datatype = 'array'
# retrieve datatype of item in list
item_key = key + '[0]'
item_datatype = self.keyMap[item_key]['value_datatype']
if syntax == 'javascript':
if item_datatype == 'list':
item_datatype = 'array'
elif item_datatype == 'map':
item_datatype = 'object'
elif 'integer_data' in self.keyMap[item_key].keys():
if self.keyMap[item_key]['integer_data']:
item_datatype = 'integer'
value_datatype += ' of %ss' % item_datatype
row.append(value_datatype)
# add required column
if value['required_field']:
row.append('yes')
else:
row.append('')
# add default column
if 'default_value' in value.keys():
default_values = True
if isinstance(value['default_value'], str):
row.append('"%s"' % value['default_value'])
elif isinstance(value['default_value'], bool):
row.append(str(value['default_value']).lower())
else:
row.append(str(value['default_value']))
else:
row.append('')
# define recursive example constructor
def determine_example(k, v):
example_value = ''
if 'example_values' in v.keys():
for i in v['example_values']:
if example_value:
example_value += ', '
if isinstance(i, str):
example_value += '"%s"' % i
else:
example_value += value
elif 'declared_value' in v.keys():
if isinstance(v['declared_value'], str):
example_value = '"%s"' % v['declared_value']
elif isinstance(v['declared_value'], bool):
example_value = str(v['declared_value']).lower()
else:
example_value = v['declared_value']
else:
if v['value_datatype'] == 'map':
example_value = '{...}'
elif v['value_datatype'] == 'list':
example_value = '[...]'
elif v['value_datatype'] == 'null':
example_value = 'null'
return example_value
# add examples column
row.append(determine_example(key, value))
# add additional conditions
conditions = ''
description = ''
for k, v in value.items():
extra_integer = False
if k == 'integer_data' and syntax == 'javascript':
extra_integer = True
if k not in ('example_values', 'value_datatype', 'required_field', 'declared_value', 'default_value', 'field_position', 'field_metadata') or extra_integer:
add_extra = False
if k == 'extra_fields':
if v:
add_extra = True
if k in ('field_description', 'field_title'):
field_description = True
if k == 'field_description':
description = v
elif not description:
description = v
elif k != 'extra_fields' or add_extra:
additional_conditions = True
if conditions:
conditions += '<br>'
condition_value = v
if isinstance(v, str):
condition_value = '"%s"' % v
elif isinstance(v, bool):
condition_value = str(v).lower()
conditions += '%s: %s' % (k, condition_value)
row.append(conditions)
row.append(description)
# add row to rows
rows.append(row)
# add rows for top field
top_dict = self.keyMap['.']
if top_dict['extra_fields']:
rows.append(['<i>**extra fields allowed</i>', '', '', '', '', '', ''])
if 'max_bytes' in top_dict.keys():
rows.append(['<i>**max bytes: %s</i>' % top_dict['max_bytes'], '', '', '', '', '', ''])
# eliminate unused columns
if not field_description:
headers.pop()
if not additional_conditions:
headers.pop()
if not default_values:
headers.pop(3)
for row in rows:
if not field_description:
row.pop()
if not additional_conditions:
row.pop()
if not default_values:
row.pop(3)
# construct table html
table_html = _tabulate(rows, headers, tablefmt='html')
# add links to urls in text
# markdown_url = re.compile('\[(.*?)\]\((.*)\)')
table_html = _add_links(table_html)
return table_html
|
a function to create a table from the class model keyMap
:param format: string with format for table output
:param syntax: [optional] string with linguistic syntax
:return: string with table
|
train
|
https://github.com/collectiveacuity/jsonModel/blob/1ea64c36d78add3faa7b85ff82c5ec685458c940/jsonmodel/_extensions.py#L34-L209
|
[
"def _segment_path(dot_path):\n import re\n digit_pat = re.compile('\\[(\\d+)\\]')\n key_list = dot_path.split('.')\n segment_list = []\n for key in key_list:\n if key:\n item_list = digit_pat.split(key)\n for item in item_list:\n if item:\n segment_list.append(item)\n return segment_list\n",
"def _add_links(text_string):\n\n import re\n url_parts = re.compile('(([A-Za-z]{3,9}:(?://)?)(?:[\\-;:&=\\+\\$,\\w]+@)?[A-Za-z0-9.\\-]+(:[0-9]+)?|(?:www.|[\\-;:&=\\+\\$,\\w]+@)[A-Za-z0-9.\\-]+)((?:/[\\+~%/.\\w\\-_]*)?\\??(?:[\\-\\+,=&;%@.\\w_]*)#?(?:[\\w]*))?')\n url_pattern = re.compile('((([A-Za-z]{3,9}:(?://)?)(?:[\\-;:&=\\+\\$,\\w]+@)?[A-Za-z0-9.\\-]+(:[0-9]+)?|(?:www.|[\\-;:&=\\+\\$,\\w]+@)[A-Za-z0-9.\\-]+)((?:/[\\+~%/.\\w\\-_]*)?\\??(?:[\\-\\+,=&;%@.\\w_]*)#?(?:[\\w]*))?)')\n\n def _replace_url(x):\n url_string = x.group(0)\n if not url_parts.findall(url_string)[0][1]:\n return url_string\n url_text = '<a href=\"%s\">%s</a>' % (url_string, url_string)\n return url_text\n\n return url_pattern.sub(_replace_url, text_string)\n",
"def determine_example(k, v):\n example_value = ''\n if 'example_values' in v.keys():\n for i in v['example_values']:\n if example_value:\n example_value += ', '\n if isinstance(i, str):\n example_value += '\"%s\"' % i\n else:\n example_value += value\n elif 'declared_value' in v.keys():\n if isinstance(v['declared_value'], str):\n example_value = '\"%s\"' % v['declared_value']\n elif isinstance(v['declared_value'], bool):\n example_value = str(v['declared_value']).lower()\n else:\n example_value = v['declared_value']\n else:\n if v['value_datatype'] == 'map':\n example_value = '{...}'\n elif v['value_datatype'] == 'list':\n example_value = '[...]'\n elif v['value_datatype'] == 'null':\n example_value = 'null'\n return example_value\n"
] |
''' a package of helper functions for extensions.py '''
__author__ = 'rcj1492'
__created__ = '2018.03'
__license__ = 'MIT'
def _segment_path(dot_path):
import re
digit_pat = re.compile('\[(\d+)\]')
key_list = dot_path.split('.')
segment_list = []
for key in key_list:
if key:
item_list = digit_pat.split(key)
for item in item_list:
if item:
segment_list.append(item)
return segment_list
def _add_links(text_string):
import re
url_parts = re.compile('(([A-Za-z]{3,9}:(?://)?)(?:[\-;:&=\+\$,\w]+@)?[A-Za-z0-9.\-]+(:[0-9]+)?|(?:www.|[\-;:&=\+\$,\w]+@)[A-Za-z0-9.\-]+)((?:/[\+~%/.\w\-_]*)?\??(?:[\-\+,=&;%@.\w_]*)#?(?:[\w]*))?')
url_pattern = re.compile('((([A-Za-z]{3,9}:(?://)?)(?:[\-;:&=\+\$,\w]+@)?[A-Za-z0-9.\-]+(:[0-9]+)?|(?:www.|[\-;:&=\+\$,\w]+@)[A-Za-z0-9.\-]+)((?:/[\+~%/.\w\-_]*)?\??(?:[\-\+,=&;%@.\w_]*)#?(?:[\w]*))?)')
def _replace_url(x):
url_string = x.group(0)
if not url_parts.findall(url_string)[0][1]:
return url_string
url_text = '<a href="%s">%s</a>' % (url_string, url_string)
return url_text
return url_pattern.sub(_replace_url, text_string)
def tabulate(self, format='html', syntax=''):
'''
a function to create a table from the class model keyMap
:param format: string with format for table output
:param syntax: [optional] string with linguistic syntax
:return: string with table
'''
from tabulate import tabulate as _tabulate
# define headers
headers = ['Field', 'Datatype', 'Required', 'Default', 'Examples', 'Conditionals', 'Description']
rows = []
default_values = False
additional_conditions = False
field_description = False
# construct rows
for key, value in self.keyMap.items():
key_segments = _segment_path(key)
if key_segments:
row = []
# add field column
field_name = ''
if len(key_segments) > 1:
for i in range(1,len(key_segments)):
field_name += ' '
if key_segments[-1] == '0':
field_name += '<i>item</i>'
else:
field_name += key_segments[-1]
row.append(field_name)
# add datatype column
value_datatype = value['value_datatype']
if 'integer_data' in value.keys():
if value['integer_data'] and syntax != 'javascript':
value_datatype = 'integer'
elif value['value_datatype'] == 'map':
if syntax == 'javascript':
value_datatype = 'object'
elif value['value_datatype'] == 'list':
if syntax == 'javascript':
value_datatype = 'array'
# retrieve datatype of item in list
item_key = key + '[0]'
item_datatype = self.keyMap[item_key]['value_datatype']
if syntax == 'javascript':
if item_datatype == 'list':
item_datatype = 'array'
elif item_datatype == 'map':
item_datatype = 'object'
elif 'integer_data' in self.keyMap[item_key].keys():
if self.keyMap[item_key]['integer_data']:
item_datatype = 'integer'
value_datatype += ' of %ss' % item_datatype
row.append(value_datatype)
# add required column
if value['required_field']:
row.append('yes')
else:
row.append('')
# add default column
if 'default_value' in value.keys():
default_values = True
if isinstance(value['default_value'], str):
row.append('"%s"' % value['default_value'])
elif isinstance(value['default_value'], bool):
row.append(str(value['default_value']).lower())
else:
row.append(str(value['default_value']))
else:
row.append('')
# define recursive example constructor
def determine_example(k, v):
example_value = ''
if 'example_values' in v.keys():
for i in v['example_values']:
if example_value:
example_value += ', '
if isinstance(i, str):
example_value += '"%s"' % i
else:
example_value += value
elif 'declared_value' in v.keys():
if isinstance(v['declared_value'], str):
example_value = '"%s"' % v['declared_value']
elif isinstance(v['declared_value'], bool):
example_value = str(v['declared_value']).lower()
else:
example_value = v['declared_value']
else:
if v['value_datatype'] == 'map':
example_value = '{...}'
elif v['value_datatype'] == 'list':
example_value = '[...]'
elif v['value_datatype'] == 'null':
example_value = 'null'
return example_value
# add examples column
row.append(determine_example(key, value))
# add additional conditions
conditions = ''
description = ''
for k, v in value.items():
extra_integer = False
if k == 'integer_data' and syntax == 'javascript':
extra_integer = True
if k not in ('example_values', 'value_datatype', 'required_field', 'declared_value', 'default_value', 'field_position', 'field_metadata') or extra_integer:
add_extra = False
if k == 'extra_fields':
if v:
add_extra = True
if k in ('field_description', 'field_title'):
field_description = True
if k == 'field_description':
description = v
elif not description:
description = v
elif k != 'extra_fields' or add_extra:
additional_conditions = True
if conditions:
conditions += '<br>'
condition_value = v
if isinstance(v, str):
condition_value = '"%s"' % v
elif isinstance(v, bool):
condition_value = str(v).lower()
conditions += '%s: %s' % (k, condition_value)
row.append(conditions)
row.append(description)
# add row to rows
rows.append(row)
# add rows for top field
top_dict = self.keyMap['.']
if top_dict['extra_fields']:
rows.append(['<i>**extra fields allowed</i>', '', '', '', '', '', ''])
if 'max_bytes' in top_dict.keys():
rows.append(['<i>**max bytes: %s</i>' % top_dict['max_bytes'], '', '', '', '', '', ''])
# eliminate unused columns
if not field_description:
headers.pop()
if not additional_conditions:
headers.pop()
if not default_values:
headers.pop(3)
for row in rows:
if not field_description:
row.pop()
if not additional_conditions:
row.pop()
if not default_values:
row.pop(3)
# construct table html
table_html = _tabulate(rows, headers, tablefmt='html')
# add links to urls in text
# markdown_url = re.compile('\[(.*?)\]\((.*)\)')
table_html = _add_links(table_html)
return table_html
|
fchauvel/MAD
|
mad/simulation/tasks.py
|
Task._execute
|
python
|
def _execute(self, worker):
self._assert_status_is(TaskStatus.RUNNING)
operation = worker.look_up(self.operation)
operation.invoke(self, [], worker=worker)
|
This method is ASSIGNED during the evaluation to control how to resume it once it has been paused
|
train
|
https://github.com/fchauvel/MAD/blob/806d5174848b1a502e5c683894995602478c448b/mad/simulation/tasks.py#L252-L258
|
[
"def _assert_status_is(self, *legal_states):\n assert self.status in legal_states, \\\n \"Found status == {0.name} (expecting {1!s})\".format(self.status, [ s.name for s in legal_states ])\n"
] |
class Task:
def __init__(self, service, request=None):
self.service = service
self.worker = None
self.request = request
self.status = TaskStatus.CREATED
@property
def priority(self):
return self.request.priority
@property
def identifier(self):
return self.request.identifier
@property
def is_cancelled(self):
return not self.request.is_pending
@property
def operation(self):
return self.request.operation
def accept(self):
self._assert_status_is(TaskStatus.CREATED)
self.service.listener.task_accepted(self)
self.request.accept()
def reject(self):
self._assert_status_is(TaskStatus.CREATED)
self.service.listener.task_rejected(self)
self.status == TaskStatus.REJECTED
self.request.reject()
def activate(self):
self._assert_status_is(TaskStatus.CREATED, TaskStatus.BLOCKED)
self.service.listener.task_activated(self)
self.status = TaskStatus.READY
def assign_to(self, worker):
assert worker, "task assigned to None!"
self._assert_status_is(TaskStatus.CREATED, TaskStatus.READY)
self.worker = worker
if self.is_cancelled:
self.discard()
else:
self.service.listener.task_assigned_to(self, worker)
self.status = TaskStatus.RUNNING
self._execute(worker)
def pause(self):
self._assert_status_is(TaskStatus.RUNNING)
self.service.listener.task_paused(self)
self.status = TaskStatus.BLOCKED
self.service.pause(self)
self.service.release(self.worker)
def resume_with(self, on_resume):
self._assert_status_is(TaskStatus.BLOCKED)
self._execute = on_resume
self.service.activate(self)
def compute(self, duration, continuation):
assert self.worker is not None, "Cannot compute, no worker attached!"
self.worker.compute(duration, continuation)
def finalise(self, status):
self.request.finalise(self, status)
def succeed(self):
self._assert_status_is(TaskStatus.RUNNING)
self.service.listener.task_successful(self)
self.status = TaskStatus.SUCCESSFUL
self.service.release(self.worker)
def discard(self):
self._assert_status_is(TaskStatus.CREATED, TaskStatus.RUNNING, TaskStatus.READY)
self.worker.listener.task_cancelled(self)
self.status = TaskStatus.FAILED
self.worker.release()
def fail(self):
self._assert_status_is(TaskStatus.RUNNING)
self.service.listener.task_failed(self)
self.status == TaskStatus.FAILED
self.service.release(self.worker)
def _assert_status_is(self, *legal_states):
assert self.status in legal_states, \
"Found status == {0.name} (expecting {1!s})".format(self.status, [ s.name for s in legal_states ])
|
fchauvel/MAD
|
mad/evaluation.py
|
Evaluation.of_think
|
python
|
def of_think(self, think):
return self._compute(
duration=think.duration,
after=self.continuation)
|
Simulate the worker processing the task for the specified amount of time.
The worker is not released and the task is not paused.
|
train
|
https://github.com/fchauvel/MAD/blob/806d5174848b1a502e5c683894995602478c448b/mad/evaluation.py#L209-L216
|
[
"def _compute(self, duration, after):\n task = self._look_up(Symbols.TASK)\n task.compute(duration, continuation=lambda: after(Success()))\n return Busy()\n"
] |
class Evaluation:
"""
Represent the future evaluation of an expression within a given environment. The expression is bound to
a continuation, that is the next evaluation to carry out.
"""
def __init__(self, environment, expression, factory, continuation=lambda x: x):
self.environment = environment
self.expression = expression
assert callable(continuation), "Continuations must be callable!"
self.continuation = continuation
self.simulation = self.environment.look_up(Symbols.SIMULATION)
self.factory = factory;
def _look_up(self, symbol):
return self.environment.look_up(symbol)
def _define(self, symbol, value):
self.environment.define(symbol, value)
def _evaluation_of(self, expression, continuation=lambda x: None):
return Evaluation(self.environment, expression, self.factory, continuation).result
@property
def result(self):
return self.expression.accept(self)
def of_service_definition(self, service):
service_environment = self.environment.create_local_environment()
service_environment.define(Symbols.LISTENER, self.factory.create_listener())
Evaluation(service_environment, Settings(), self.factory).result
Evaluation(service_environment, service.body, self.factory).result
worker_pool = self.factory.create_worker_pool(service_environment)
service_environment.define(Symbols.WORKER_POOL, worker_pool)
service = self.factory.create_service(service.name, service_environment)
self._define(service.name, service)
monitor = self.factory.create_monitor(Symbols.MONITOR, service_environment, None)
service_environment.define(Symbols.MONITOR, monitor)
logger = self.factory.create_logger(service_environment)
service_environment.define(Symbols.LOGGER, logger)
return self.continuation(Success(service))
def of_settings(self, settings):
self._evaluation_of(settings.queue)
self._evaluation_of(settings.throttling)
self._evaluation_of(settings.autoscaling)
return self.continuation(Success(None))
def of_fifo(self, fifo):
queue = self.factory.create_FIFO_task_pool(self.environment)
self._define(Symbols.QUEUE, queue)
return self.continuation(Success(None))
def of_lifo(self, lifo):
queue = self.factory.create_LIFO_task_pool(self.environment)
self._define(Symbols.QUEUE, queue)
return self.continuation(Success(None))
def of_autoscaling(self, autoscaling):
autoscaler = self.factory.create_autoscaler(self.environment, autoscaling)
self._define(Symbols.AUTOSCALING, autoscaler)
return self.continuation(Success(None))
def of_tail_drop(self, definition):
task_pool = self._look_up(Symbols.QUEUE)
tail_drop = self.factory.create_tail_drop(self.environment, definition.capacity, task_pool)
self._define(Symbols.QUEUE, tail_drop)
return self.continuation(Success(None))
def of_no_throttling(self, no_throttling):
task_pool = self._look_up(Symbols.QUEUE)
no_throttling = self.factory.create_no_throttling(self.environment, task_pool)
self._define(Symbols.QUEUE, no_throttling)
return self.continuation(Success(None))
def of_operation_definition(self, operation_definition):
operation = self.factory.create_operation(self.environment, operation_definition)
self.environment.define(operation_definition.name, operation)
return self.continuation(Success(operation))
def of_client_stub_definition(self, definition):
client_environment = self.environment.create_local_environment()
client_environment.define(Symbols.LISTENER, self.factory.create_listener())
client = self.factory.create_client_stub(client_environment, definition)
self._define(definition.name, client)
client.initialize()
monitor = self.factory.create_monitor(Symbols.MONITOR, client_environment, None)
client_environment.define(Symbols.MONITOR, monitor)
logger = self.factory.create_logger(client_environment)
client_environment.define(Symbols.LOGGER, logger)
return self.continuation(Success(client))
def of_sequence(self, sequence):
def abort_on_error(previous):
if previous.is_successful:
return self._evaluation_of(sequence.rest, self.continuation)
else:
return self.continuation(previous)
return self._evaluation_of(sequence.first_expression, abort_on_error)
def of_trigger(self, trigger):
return self._compute(
duration=1,
after=lambda status: self._do_trigger(trigger))
def of_query(self, query):
return self._compute(
duration=1,
after=lambda status: self._send_query(query))
def of_fail(self, fail):
if random() < fail.probability:
return self.continuation(Error())
else:
return self.continuation(Success(None))
def of_retry(self, retry):
task = self._look_up(Symbols.TASK)
sender = self._look_up(Symbols.SELF)
backoff = self.factory.create_backoff(retry.delay)
def retry_on_error(remaining_tries):
if remaining_tries <= 0:
return lambda s: self.continuation(Error())
else:
def continuation(status):
if status.is_successful:
return self.continuation(Success(None))
else:
def try_again(worker):
self._evaluation_of(retry.expression, retry_on_error(remaining_tries-1))
delay = backoff.delay(retry.limit - remaining_tries)
sender.schedule.after(delay, lambda: task.resume_with(try_again))
task.pause()
return Paused()
return continuation
return self._evaluation_of(retry.expression, retry_on_error(retry.limit-1))
def of_ignore_error(self, ignore_error):
def ignore_status(status):
return self.continuation(Success(status.value))
return self._evaluation_of(ignore_error.expression, ignore_status)
def _do_trigger(self, trigger):
task = self._look_up(Symbols.TASK)
request = self.factory.create_trigger(task, trigger.operation, trigger.priority, self.continuation)
recipient = self._look_up(trigger.service)
request.send_to(recipient)
task.pause()
return Paused()
def _send_query(self, query):
task = self._look_up(Symbols.TASK)
sender = self._look_up(Symbols.SELF)
request = self.factory.create_query(task, query.operation, query.priority, self.continuation)
recipient = self._look_up(query.service)
request.send_to(recipient)
# TODO Move this in Request
if query.has_timeout:
def on_check_timeout():
if request.is_pending:
sender.listener.timeout_of(request)
request.discard()
task.resume_with(lambda worker: self.continuation(Error()))
sender.schedule.after(query.timeout, on_check_timeout)
task.pause()
return Paused()
def _compute(self, duration, after):
task = self._look_up(Symbols.TASK)
task.compute(duration, continuation=lambda: after(Success()))
return Busy()
|
fchauvel/MAD
|
mad/parsing.py
|
p_definition_list
|
python
|
def p_definition_list(p):
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_action_list'")
|
definition_list : definition definition_list
| definition
|
train
|
https://github.com/fchauvel/MAD/blob/806d5174848b1a502e5c683894995602478c448b/mad/parsing.py#L122-L132
| null |
#!/usr/bin/env python
#
# This file is part of MAD.
#
# MAD is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MAD is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MAD. If not, see <http://www.gnu.org/licenses/>.
#
import ply.lex as lex
import ply.yacc as yacc
from mad.ast.settings import *
from mad.ast.definitions import *
from mad.ast.actions import *
reserved = {
"autoscaling": "AUTOSCALING",
"client": "CLIENT",
"delay": "DELAY",
"every": "EVERY",
"fail": "FAIL",
"FIFO": "FIFO",
"ignore": "IGNORE",
"invoke": "INVOKE",
"LIFO": "LIFO",
"limit": "LIMIT",
"limits": "LIMITS",
"none": "NONE",
"operation": "OPERATION",
"period": "PERIOD",
"priority": "PRIORITY",
"queue": "QUEUE",
"query": "QUERY",
"retry": "RETRY",
"service": "SERVICE",
"settings": "SETTINGS",
"tail-drop": "TAIL_DROP",
"think": "THINK",
"throttling": "THROTTLING",
"timeout": "TIMEOUT"
}
# List of token names. This is always required
tokens = [ "CLOSE_BRACKET",
"CLOSE_CURLY_BRACKET",
"CLOSE_SQUARE_BRACKET",
"COLON",
"COMMA",
"IDENTIFIER",
"OPEN_BRACKET",
"OPEN_CURLY_BRACKET",
"OPEN_SQUARE_BRACKET",
"NUMBER",
"REAL",
"SLASH"] + list(reserved.values())
t_CLOSE_BRACKET = r"\)"
t_CLOSE_CURLY_BRACKET = r"\}"
t_CLOSE_SQUARE_BRACKET = r"\]"
t_COLON = r":"
t_COMMA = r","
t_OPEN_BRACKET = r"\("
t_OPEN_CURLY_BRACKET = r"\{"
t_OPEN_SQUARE_BRACKET = r"\["
t_SLASH = r"/"
def t_IDENTIFIER(t):
r'[a-zA-Z_][a-zA-Z0-9_\-]+'
t.type = reserved.get(t.value,'IDENTIFIER') # Check for reserved words
return t
def t_NUMBER(t):
r'[+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?'
return t
def t_newline(t):
# Define a rule so we can track line numbers
r'\n+'
t.lexer.lineno += 1
def t_COMMENT(t):
r'\#.*'
pass
# A string containing ignored characters (spaces and tabs)
t_ignore = ' \t'
def t_error(t):
# Error handling rule
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
lexer = lex.lex()
# -----------------------------
# Parsing rules
def p_unit(p):
"""
unit : definition_list
"""
p[0] = p[1]
def p_definition(p):
"""
definition : define_service
| define_client
"""
p[0] = p[1]
def p_define_service(p):
"""
define_service : SERVICE IDENTIFIER OPEN_CURLY_BRACKET settings operation_list CLOSE_CURLY_BRACKET
| SERVICE IDENTIFIER OPEN_CURLY_BRACKET operation_list CLOSE_CURLY_BRACKET
"""
if len(p) == 7:
body = p[4] + p[5]
else:
body = p[4]
p[0] = DefineService(p[2], body)
def p_settings(p):
"""
settings : SETTINGS OPEN_CURLY_BRACKET setting_list CLOSE_CURLY_BRACKET
"""
p[0] = Settings(**p[3])
def p_setting_list(p):
"""
setting_list : setting setting_list
| setting
"""
if len(p) == 3:
p[0] = merge_map(p[1], p[2])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_setting_list'")
def p_setting(p):
"""
setting : queue
| autoscaling
| throttling
"""
p[0] = p[1]
def p_queue(p):
"""
queue : QUEUE COLON LIFO
| QUEUE COLON FIFO
"""
if p[3] == "LIFO":
p[0] = {"queue": LIFO()}
elif p[3] == "FIFO":
p[0] = {"queue": FIFO()}
else:
raise RuntimeError("Queue discipline '%s' is not supported!" % p[1])
def p_throttling(p):
"""
throttling : THROTTLING COLON NONE
| THROTTLING COLON TAIL_DROP OPEN_BRACKET NUMBER CLOSE_BRACKET
"""
throttling = NoThrottlingSettings()
if len(p) == 7:
throttling = TailDropSettings(int(p[5]))
p[0] = {"throttling": throttling}
def p_autoscaling(p):
"""
autoscaling : AUTOSCALING OPEN_CURLY_BRACKET autoscaling_setting_list CLOSE_CURLY_BRACKET
"""
p[0] = {"autoscaling": Autoscaling(**p[3])}
def p_autoscaling_setting_list(p):
"""
autoscaling_setting_list : autoscaling_setting autoscaling_setting_list
| autoscaling_setting
"""
if len(p) == 3:
p[0] = merge_map(p[1], p[2])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production in 'autoscaling_setting_list'")
def p_autoscaling_setting(p):
"""
autoscaling_setting : PERIOD COLON NUMBER
| LIMITS COLON OPEN_SQUARE_BRACKET NUMBER COMMA NUMBER CLOSE_SQUARE_BRACKET
"""
if len(p) == 8:
p[0] = {"limits": (int(p[4]), int(p[6]))}
elif len(p) == 4:
p[0] = {"period": int(p[3])}
else:
raise RuntimeError("Invalid product in 'autoscaling_setting'")
def p_operation_list(p):
"""
operation_list : define_operation operation_list
| define_operation
"""
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_operation_list'")
def p_define_client(p):
"""
define_client : CLIENT IDENTIFIER OPEN_CURLY_BRACKET EVERY NUMBER OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET CLOSE_CURLY_BRACKET
"""
p[0] = DefineClientStub(p[2], int(p[5]), p[7])
def p_define_operation(p):
"""
define_operation : OPERATION IDENTIFIER OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
p[0] = DefineOperation(p[2], p[4])
def p_action_list(p):
"""
action_list : action action_list
| action
"""
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_action_list'")
def p_action(p):
"""
action : invoke
| query
| think
| fail
| retry
| ignore
"""
p[0] = p[1]
def p_think(p):
"""
think : THINK NUMBER
"""
p[0] = Think(int(p[2]))
def p_fail(p):
"""
fail : FAIL NUMBER
| FAIL
"""
if len(p) > 2:
p[0] = Fail(float(p[2]))
else:
p[0] = Fail()
def p_query(p):
"""
query : QUERY IDENTIFIER SLASH IDENTIFIER
| QUERY IDENTIFIER SLASH IDENTIFIER OPEN_CURLY_BRACKET query_option_list CLOSE_CURLY_BRACKET
"""
parameters = {"service": p[2], "operation": p[4]}
if len(p) > 5:
parameters = merge_map(parameters, p[6])
p[0] = Query(**parameters)
def p_query_option_list(p):
"""
query_option_list : query_option COMMA query_option_list
| query_option
"""
if len(p) == 2:
p[0] = p[1]
elif len(p) == 4:
p[0] = merge_map(p[1], p[3])
else:
raise RuntimeError("Invalid product rules for 'query_option_list'")
def p_query_option(p):
"""
query_option : timeout
| priority
"""
p[0] = p[1]
def p_timeout(p):
"""
timeout : TIMEOUT COLON NUMBER
"""
p[0] = {"timeout": int(p[3])}
def p_priority(p):
"""
priority : PRIORITY COLON NUMBER
"""
p[0] = {"priority": int(p[3])}
def p_invoke(p):
"""
invoke : INVOKE IDENTIFIER SLASH IDENTIFIER
| INVOKE IDENTIFIER SLASH IDENTIFIER OPEN_CURLY_BRACKET PRIORITY COLON NUMBER CLOSE_CURLY_BRACKET
"""
priority = None
if len(p) > 5:
priority = int(p[8])
p[0] = Trigger(p[2], p[4], priority)
def p_retry(p):
"""
retry : RETRY OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
| RETRY OPEN_BRACKET retry_option_list CLOSE_BRACKET OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
if len(p) == 5:
p[0] = Retry(p[3])
elif len(p) == 8:
p[0] = Retry(p[6], **p[3])
else:
raise RuntimeError("Invalid product rules for 'retry_option_list'")
def p_retry_option_list(p):
"""
retry_option_list : retry_option COMMA retry_option_list
| retry_option
"""
if len(p) == 4:
p[0] = merge_map(p[1], p[3])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production in 'retry_option_list'")
def p_retry_option(p):
"""
retry_option : LIMIT COLON NUMBER
| DELAY COLON IDENTIFIER OPEN_BRACKET NUMBER CLOSE_BRACKET
"""
if len(p) == 4:
p[0] = {"limit": int(p[3]) }
elif len(p) == 7:
p[0] = {"delay": Delay(int(p[5]), p[3])}
else:
raise RuntimeError("Invalid production in 'retry_option'")
def p_ignore(p):
"""
ignore : IGNORE OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
p[0] = IgnoreError(p[3])
def p_error(t):
raise MADSyntaxError((t.lineno, t.lexpos), t.value)
def merge_map(map_A, map_B):
tmp = map_A.copy()
tmp.update(map_B)
return tmp
class MADSyntaxError(BaseException):
def __init__(self, position, hint):
self.position = position
self.hint = hint
@property
def line_number(self):
return self.position[0]
def __repr__(self):
return "Syntax error at line {line:d}, around '{hint}'.".format(
line=self.position[0],
hint=self.hint)
class Parser:
def __init__(self, file_system, root_file):
self.root_file = root_file
self.file_system = file_system
def parse(self, entry_rule="unit", logger=yacc.NullLogger()):
lexer.lineno = 1
text = self._content()
parser = yacc.yacc(start=entry_rule, errorlog=logger)
return parser.parse(lexer=lexer, input=text)
def _content(self):
lines = self.file_system.open_input_stream(self.root_file).readlines()
return "\n".join(lines)
|
fchauvel/MAD
|
mad/parsing.py
|
p_define_service
|
python
|
def p_define_service(p):
if len(p) == 7:
body = p[4] + p[5]
else:
body = p[4]
p[0] = DefineService(p[2], body)
|
define_service : SERVICE IDENTIFIER OPEN_CURLY_BRACKET settings operation_list CLOSE_CURLY_BRACKET
| SERVICE IDENTIFIER OPEN_CURLY_BRACKET operation_list CLOSE_CURLY_BRACKET
|
train
|
https://github.com/fchauvel/MAD/blob/806d5174848b1a502e5c683894995602478c448b/mad/parsing.py#L143-L152
| null |
#!/usr/bin/env python
#
# This file is part of MAD.
#
# MAD is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MAD is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MAD. If not, see <http://www.gnu.org/licenses/>.
#
import ply.lex as lex
import ply.yacc as yacc
from mad.ast.settings import *
from mad.ast.definitions import *
from mad.ast.actions import *
reserved = {
"autoscaling": "AUTOSCALING",
"client": "CLIENT",
"delay": "DELAY",
"every": "EVERY",
"fail": "FAIL",
"FIFO": "FIFO",
"ignore": "IGNORE",
"invoke": "INVOKE",
"LIFO": "LIFO",
"limit": "LIMIT",
"limits": "LIMITS",
"none": "NONE",
"operation": "OPERATION",
"period": "PERIOD",
"priority": "PRIORITY",
"queue": "QUEUE",
"query": "QUERY",
"retry": "RETRY",
"service": "SERVICE",
"settings": "SETTINGS",
"tail-drop": "TAIL_DROP",
"think": "THINK",
"throttling": "THROTTLING",
"timeout": "TIMEOUT"
}
# List of token names. This is always required
tokens = [ "CLOSE_BRACKET",
"CLOSE_CURLY_BRACKET",
"CLOSE_SQUARE_BRACKET",
"COLON",
"COMMA",
"IDENTIFIER",
"OPEN_BRACKET",
"OPEN_CURLY_BRACKET",
"OPEN_SQUARE_BRACKET",
"NUMBER",
"REAL",
"SLASH"] + list(reserved.values())
t_CLOSE_BRACKET = r"\)"
t_CLOSE_CURLY_BRACKET = r"\}"
t_CLOSE_SQUARE_BRACKET = r"\]"
t_COLON = r":"
t_COMMA = r","
t_OPEN_BRACKET = r"\("
t_OPEN_CURLY_BRACKET = r"\{"
t_OPEN_SQUARE_BRACKET = r"\["
t_SLASH = r"/"
def t_IDENTIFIER(t):
r'[a-zA-Z_][a-zA-Z0-9_\-]+'
t.type = reserved.get(t.value,'IDENTIFIER') # Check for reserved words
return t
def t_NUMBER(t):
r'[+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?'
return t
def t_newline(t):
# Define a rule so we can track line numbers
r'\n+'
t.lexer.lineno += 1
def t_COMMENT(t):
r'\#.*'
pass
# A string containing ignored characters (spaces and tabs)
t_ignore = ' \t'
def t_error(t):
# Error handling rule
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
lexer = lex.lex()
# -----------------------------
# Parsing rules
def p_unit(p):
"""
unit : definition_list
"""
p[0] = p[1]
def p_definition_list(p):
"""
definition_list : definition definition_list
| definition
"""
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_action_list'")
def p_definition(p):
"""
definition : define_service
| define_client
"""
p[0] = p[1]
def p_settings(p):
"""
settings : SETTINGS OPEN_CURLY_BRACKET setting_list CLOSE_CURLY_BRACKET
"""
p[0] = Settings(**p[3])
def p_setting_list(p):
"""
setting_list : setting setting_list
| setting
"""
if len(p) == 3:
p[0] = merge_map(p[1], p[2])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_setting_list'")
def p_setting(p):
"""
setting : queue
| autoscaling
| throttling
"""
p[0] = p[1]
def p_queue(p):
"""
queue : QUEUE COLON LIFO
| QUEUE COLON FIFO
"""
if p[3] == "LIFO":
p[0] = {"queue": LIFO()}
elif p[3] == "FIFO":
p[0] = {"queue": FIFO()}
else:
raise RuntimeError("Queue discipline '%s' is not supported!" % p[1])
def p_throttling(p):
"""
throttling : THROTTLING COLON NONE
| THROTTLING COLON TAIL_DROP OPEN_BRACKET NUMBER CLOSE_BRACKET
"""
throttling = NoThrottlingSettings()
if len(p) == 7:
throttling = TailDropSettings(int(p[5]))
p[0] = {"throttling": throttling}
def p_autoscaling(p):
"""
autoscaling : AUTOSCALING OPEN_CURLY_BRACKET autoscaling_setting_list CLOSE_CURLY_BRACKET
"""
p[0] = {"autoscaling": Autoscaling(**p[3])}
def p_autoscaling_setting_list(p):
"""
autoscaling_setting_list : autoscaling_setting autoscaling_setting_list
| autoscaling_setting
"""
if len(p) == 3:
p[0] = merge_map(p[1], p[2])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production in 'autoscaling_setting_list'")
def p_autoscaling_setting(p):
"""
autoscaling_setting : PERIOD COLON NUMBER
| LIMITS COLON OPEN_SQUARE_BRACKET NUMBER COMMA NUMBER CLOSE_SQUARE_BRACKET
"""
if len(p) == 8:
p[0] = {"limits": (int(p[4]), int(p[6]))}
elif len(p) == 4:
p[0] = {"period": int(p[3])}
else:
raise RuntimeError("Invalid product in 'autoscaling_setting'")
def p_operation_list(p):
"""
operation_list : define_operation operation_list
| define_operation
"""
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_operation_list'")
def p_define_client(p):
"""
define_client : CLIENT IDENTIFIER OPEN_CURLY_BRACKET EVERY NUMBER OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET CLOSE_CURLY_BRACKET
"""
p[0] = DefineClientStub(p[2], int(p[5]), p[7])
def p_define_operation(p):
"""
define_operation : OPERATION IDENTIFIER OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
p[0] = DefineOperation(p[2], p[4])
def p_action_list(p):
"""
action_list : action action_list
| action
"""
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_action_list'")
def p_action(p):
"""
action : invoke
| query
| think
| fail
| retry
| ignore
"""
p[0] = p[1]
def p_think(p):
"""
think : THINK NUMBER
"""
p[0] = Think(int(p[2]))
def p_fail(p):
"""
fail : FAIL NUMBER
| FAIL
"""
if len(p) > 2:
p[0] = Fail(float(p[2]))
else:
p[0] = Fail()
def p_query(p):
"""
query : QUERY IDENTIFIER SLASH IDENTIFIER
| QUERY IDENTIFIER SLASH IDENTIFIER OPEN_CURLY_BRACKET query_option_list CLOSE_CURLY_BRACKET
"""
parameters = {"service": p[2], "operation": p[4]}
if len(p) > 5:
parameters = merge_map(parameters, p[6])
p[0] = Query(**parameters)
def p_query_option_list(p):
"""
query_option_list : query_option COMMA query_option_list
| query_option
"""
if len(p) == 2:
p[0] = p[1]
elif len(p) == 4:
p[0] = merge_map(p[1], p[3])
else:
raise RuntimeError("Invalid product rules for 'query_option_list'")
def p_query_option(p):
"""
query_option : timeout
| priority
"""
p[0] = p[1]
def p_timeout(p):
"""
timeout : TIMEOUT COLON NUMBER
"""
p[0] = {"timeout": int(p[3])}
def p_priority(p):
"""
priority : PRIORITY COLON NUMBER
"""
p[0] = {"priority": int(p[3])}
def p_invoke(p):
"""
invoke : INVOKE IDENTIFIER SLASH IDENTIFIER
| INVOKE IDENTIFIER SLASH IDENTIFIER OPEN_CURLY_BRACKET PRIORITY COLON NUMBER CLOSE_CURLY_BRACKET
"""
priority = None
if len(p) > 5:
priority = int(p[8])
p[0] = Trigger(p[2], p[4], priority)
def p_retry(p):
"""
retry : RETRY OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
| RETRY OPEN_BRACKET retry_option_list CLOSE_BRACKET OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
if len(p) == 5:
p[0] = Retry(p[3])
elif len(p) == 8:
p[0] = Retry(p[6], **p[3])
else:
raise RuntimeError("Invalid product rules for 'retry_option_list'")
def p_retry_option_list(p):
"""
retry_option_list : retry_option COMMA retry_option_list
| retry_option
"""
if len(p) == 4:
p[0] = merge_map(p[1], p[3])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production in 'retry_option_list'")
def p_retry_option(p):
"""
retry_option : LIMIT COLON NUMBER
| DELAY COLON IDENTIFIER OPEN_BRACKET NUMBER CLOSE_BRACKET
"""
if len(p) == 4:
p[0] = {"limit": int(p[3]) }
elif len(p) == 7:
p[0] = {"delay": Delay(int(p[5]), p[3])}
else:
raise RuntimeError("Invalid production in 'retry_option'")
def p_ignore(p):
"""
ignore : IGNORE OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
p[0] = IgnoreError(p[3])
def p_error(t):
raise MADSyntaxError((t.lineno, t.lexpos), t.value)
def merge_map(map_A, map_B):
tmp = map_A.copy()
tmp.update(map_B)
return tmp
class MADSyntaxError(BaseException):
def __init__(self, position, hint):
self.position = position
self.hint = hint
@property
def line_number(self):
return self.position[0]
def __repr__(self):
return "Syntax error at line {line:d}, around '{hint}'.".format(
line=self.position[0],
hint=self.hint)
class Parser:
def __init__(self, file_system, root_file):
self.root_file = root_file
self.file_system = file_system
def parse(self, entry_rule="unit", logger=yacc.NullLogger()):
lexer.lineno = 1
text = self._content()
parser = yacc.yacc(start=entry_rule, errorlog=logger)
return parser.parse(lexer=lexer, input=text)
def _content(self):
lines = self.file_system.open_input_stream(self.root_file).readlines()
return "\n".join(lines)
|
fchauvel/MAD
|
mad/parsing.py
|
p_setting_list
|
python
|
def p_setting_list(p):
if len(p) == 3:
p[0] = merge_map(p[1], p[2])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_setting_list'")
|
setting_list : setting setting_list
| setting
|
train
|
https://github.com/fchauvel/MAD/blob/806d5174848b1a502e5c683894995602478c448b/mad/parsing.py#L162-L172
|
[
"def merge_map(map_A, map_B):\n tmp = map_A.copy()\n tmp.update(map_B)\n return tmp\n"
] |
#!/usr/bin/env python
#
# This file is part of MAD.
#
# MAD is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MAD is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MAD. If not, see <http://www.gnu.org/licenses/>.
#
import ply.lex as lex
import ply.yacc as yacc
from mad.ast.settings import *
from mad.ast.definitions import *
from mad.ast.actions import *
reserved = {
"autoscaling": "AUTOSCALING",
"client": "CLIENT",
"delay": "DELAY",
"every": "EVERY",
"fail": "FAIL",
"FIFO": "FIFO",
"ignore": "IGNORE",
"invoke": "INVOKE",
"LIFO": "LIFO",
"limit": "LIMIT",
"limits": "LIMITS",
"none": "NONE",
"operation": "OPERATION",
"period": "PERIOD",
"priority": "PRIORITY",
"queue": "QUEUE",
"query": "QUERY",
"retry": "RETRY",
"service": "SERVICE",
"settings": "SETTINGS",
"tail-drop": "TAIL_DROP",
"think": "THINK",
"throttling": "THROTTLING",
"timeout": "TIMEOUT"
}
# List of token names. This is always required
tokens = [ "CLOSE_BRACKET",
"CLOSE_CURLY_BRACKET",
"CLOSE_SQUARE_BRACKET",
"COLON",
"COMMA",
"IDENTIFIER",
"OPEN_BRACKET",
"OPEN_CURLY_BRACKET",
"OPEN_SQUARE_BRACKET",
"NUMBER",
"REAL",
"SLASH"] + list(reserved.values())
t_CLOSE_BRACKET = r"\)"
t_CLOSE_CURLY_BRACKET = r"\}"
t_CLOSE_SQUARE_BRACKET = r"\]"
t_COLON = r":"
t_COMMA = r","
t_OPEN_BRACKET = r"\("
t_OPEN_CURLY_BRACKET = r"\{"
t_OPEN_SQUARE_BRACKET = r"\["
t_SLASH = r"/"
def t_IDENTIFIER(t):
r'[a-zA-Z_][a-zA-Z0-9_\-]+'
t.type = reserved.get(t.value,'IDENTIFIER') # Check for reserved words
return t
def t_NUMBER(t):
r'[+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?'
return t
def t_newline(t):
# Define a rule so we can track line numbers
r'\n+'
t.lexer.lineno += 1
def t_COMMENT(t):
r'\#.*'
pass
# A string containing ignored characters (spaces and tabs)
t_ignore = ' \t'
def t_error(t):
# Error handling rule
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
lexer = lex.lex()
# -----------------------------
# Parsing rules
def p_unit(p):
"""
unit : definition_list
"""
p[0] = p[1]
def p_definition_list(p):
"""
definition_list : definition definition_list
| definition
"""
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_action_list'")
def p_definition(p):
"""
definition : define_service
| define_client
"""
p[0] = p[1]
def p_define_service(p):
"""
define_service : SERVICE IDENTIFIER OPEN_CURLY_BRACKET settings operation_list CLOSE_CURLY_BRACKET
| SERVICE IDENTIFIER OPEN_CURLY_BRACKET operation_list CLOSE_CURLY_BRACKET
"""
if len(p) == 7:
body = p[4] + p[5]
else:
body = p[4]
p[0] = DefineService(p[2], body)
def p_settings(p):
"""
settings : SETTINGS OPEN_CURLY_BRACKET setting_list CLOSE_CURLY_BRACKET
"""
p[0] = Settings(**p[3])
def p_setting(p):
"""
setting : queue
| autoscaling
| throttling
"""
p[0] = p[1]
def p_queue(p):
"""
queue : QUEUE COLON LIFO
| QUEUE COLON FIFO
"""
if p[3] == "LIFO":
p[0] = {"queue": LIFO()}
elif p[3] == "FIFO":
p[0] = {"queue": FIFO()}
else:
raise RuntimeError("Queue discipline '%s' is not supported!" % p[1])
def p_throttling(p):
"""
throttling : THROTTLING COLON NONE
| THROTTLING COLON TAIL_DROP OPEN_BRACKET NUMBER CLOSE_BRACKET
"""
throttling = NoThrottlingSettings()
if len(p) == 7:
throttling = TailDropSettings(int(p[5]))
p[0] = {"throttling": throttling}
def p_autoscaling(p):
"""
autoscaling : AUTOSCALING OPEN_CURLY_BRACKET autoscaling_setting_list CLOSE_CURLY_BRACKET
"""
p[0] = {"autoscaling": Autoscaling(**p[3])}
def p_autoscaling_setting_list(p):
"""
autoscaling_setting_list : autoscaling_setting autoscaling_setting_list
| autoscaling_setting
"""
if len(p) == 3:
p[0] = merge_map(p[1], p[2])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production in 'autoscaling_setting_list'")
def p_autoscaling_setting(p):
"""
autoscaling_setting : PERIOD COLON NUMBER
| LIMITS COLON OPEN_SQUARE_BRACKET NUMBER COMMA NUMBER CLOSE_SQUARE_BRACKET
"""
if len(p) == 8:
p[0] = {"limits": (int(p[4]), int(p[6]))}
elif len(p) == 4:
p[0] = {"period": int(p[3])}
else:
raise RuntimeError("Invalid product in 'autoscaling_setting'")
def p_operation_list(p):
"""
operation_list : define_operation operation_list
| define_operation
"""
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_operation_list'")
def p_define_client(p):
"""
define_client : CLIENT IDENTIFIER OPEN_CURLY_BRACKET EVERY NUMBER OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET CLOSE_CURLY_BRACKET
"""
p[0] = DefineClientStub(p[2], int(p[5]), p[7])
def p_define_operation(p):
"""
define_operation : OPERATION IDENTIFIER OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
p[0] = DefineOperation(p[2], p[4])
def p_action_list(p):
"""
action_list : action action_list
| action
"""
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_action_list'")
def p_action(p):
"""
action : invoke
| query
| think
| fail
| retry
| ignore
"""
p[0] = p[1]
def p_think(p):
"""
think : THINK NUMBER
"""
p[0] = Think(int(p[2]))
def p_fail(p):
"""
fail : FAIL NUMBER
| FAIL
"""
if len(p) > 2:
p[0] = Fail(float(p[2]))
else:
p[0] = Fail()
def p_query(p):
"""
query : QUERY IDENTIFIER SLASH IDENTIFIER
| QUERY IDENTIFIER SLASH IDENTIFIER OPEN_CURLY_BRACKET query_option_list CLOSE_CURLY_BRACKET
"""
parameters = {"service": p[2], "operation": p[4]}
if len(p) > 5:
parameters = merge_map(parameters, p[6])
p[0] = Query(**parameters)
def p_query_option_list(p):
"""
query_option_list : query_option COMMA query_option_list
| query_option
"""
if len(p) == 2:
p[0] = p[1]
elif len(p) == 4:
p[0] = merge_map(p[1], p[3])
else:
raise RuntimeError("Invalid product rules for 'query_option_list'")
def p_query_option(p):
"""
query_option : timeout
| priority
"""
p[0] = p[1]
def p_timeout(p):
"""
timeout : TIMEOUT COLON NUMBER
"""
p[0] = {"timeout": int(p[3])}
def p_priority(p):
"""
priority : PRIORITY COLON NUMBER
"""
p[0] = {"priority": int(p[3])}
def p_invoke(p):
"""
invoke : INVOKE IDENTIFIER SLASH IDENTIFIER
| INVOKE IDENTIFIER SLASH IDENTIFIER OPEN_CURLY_BRACKET PRIORITY COLON NUMBER CLOSE_CURLY_BRACKET
"""
priority = None
if len(p) > 5:
priority = int(p[8])
p[0] = Trigger(p[2], p[4], priority)
def p_retry(p):
"""
retry : RETRY OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
| RETRY OPEN_BRACKET retry_option_list CLOSE_BRACKET OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
if len(p) == 5:
p[0] = Retry(p[3])
elif len(p) == 8:
p[0] = Retry(p[6], **p[3])
else:
raise RuntimeError("Invalid product rules for 'retry_option_list'")
def p_retry_option_list(p):
"""
retry_option_list : retry_option COMMA retry_option_list
| retry_option
"""
if len(p) == 4:
p[0] = merge_map(p[1], p[3])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production in 'retry_option_list'")
def p_retry_option(p):
"""
retry_option : LIMIT COLON NUMBER
| DELAY COLON IDENTIFIER OPEN_BRACKET NUMBER CLOSE_BRACKET
"""
if len(p) == 4:
p[0] = {"limit": int(p[3]) }
elif len(p) == 7:
p[0] = {"delay": Delay(int(p[5]), p[3])}
else:
raise RuntimeError("Invalid production in 'retry_option'")
def p_ignore(p):
"""
ignore : IGNORE OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
p[0] = IgnoreError(p[3])
def p_error(t):
raise MADSyntaxError((t.lineno, t.lexpos), t.value)
def merge_map(map_A, map_B):
tmp = map_A.copy()
tmp.update(map_B)
return tmp
class MADSyntaxError(BaseException):
def __init__(self, position, hint):
self.position = position
self.hint = hint
@property
def line_number(self):
return self.position[0]
def __repr__(self):
return "Syntax error at line {line:d}, around '{hint}'.".format(
line=self.position[0],
hint=self.hint)
class Parser:
def __init__(self, file_system, root_file):
self.root_file = root_file
self.file_system = file_system
def parse(self, entry_rule="unit", logger=yacc.NullLogger()):
lexer.lineno = 1
text = self._content()
parser = yacc.yacc(start=entry_rule, errorlog=logger)
return parser.parse(lexer=lexer, input=text)
def _content(self):
lines = self.file_system.open_input_stream(self.root_file).readlines()
return "\n".join(lines)
|
fchauvel/MAD
|
mad/parsing.py
|
p_queue
|
python
|
def p_queue(p):
if p[3] == "LIFO":
p[0] = {"queue": LIFO()}
elif p[3] == "FIFO":
p[0] = {"queue": FIFO()}
else:
raise RuntimeError("Queue discipline '%s' is not supported!" % p[1])
|
queue : QUEUE COLON LIFO
| QUEUE COLON FIFO
|
train
|
https://github.com/fchauvel/MAD/blob/806d5174848b1a502e5c683894995602478c448b/mad/parsing.py#L184-L196
| null |
#!/usr/bin/env python
#
# This file is part of MAD.
#
# MAD is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MAD is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MAD. If not, see <http://www.gnu.org/licenses/>.
#
import ply.lex as lex
import ply.yacc as yacc
from mad.ast.settings import *
from mad.ast.definitions import *
from mad.ast.actions import *
reserved = {
"autoscaling": "AUTOSCALING",
"client": "CLIENT",
"delay": "DELAY",
"every": "EVERY",
"fail": "FAIL",
"FIFO": "FIFO",
"ignore": "IGNORE",
"invoke": "INVOKE",
"LIFO": "LIFO",
"limit": "LIMIT",
"limits": "LIMITS",
"none": "NONE",
"operation": "OPERATION",
"period": "PERIOD",
"priority": "PRIORITY",
"queue": "QUEUE",
"query": "QUERY",
"retry": "RETRY",
"service": "SERVICE",
"settings": "SETTINGS",
"tail-drop": "TAIL_DROP",
"think": "THINK",
"throttling": "THROTTLING",
"timeout": "TIMEOUT"
}
# List of token names. This is always required
tokens = [ "CLOSE_BRACKET",
"CLOSE_CURLY_BRACKET",
"CLOSE_SQUARE_BRACKET",
"COLON",
"COMMA",
"IDENTIFIER",
"OPEN_BRACKET",
"OPEN_CURLY_BRACKET",
"OPEN_SQUARE_BRACKET",
"NUMBER",
"REAL",
"SLASH"] + list(reserved.values())
t_CLOSE_BRACKET = r"\)"
t_CLOSE_CURLY_BRACKET = r"\}"
t_CLOSE_SQUARE_BRACKET = r"\]"
t_COLON = r":"
t_COMMA = r","
t_OPEN_BRACKET = r"\("
t_OPEN_CURLY_BRACKET = r"\{"
t_OPEN_SQUARE_BRACKET = r"\["
t_SLASH = r"/"
def t_IDENTIFIER(t):
r'[a-zA-Z_][a-zA-Z0-9_\-]+'
t.type = reserved.get(t.value,'IDENTIFIER') # Check for reserved words
return t
def t_NUMBER(t):
r'[+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?'
return t
def t_newline(t):
# Define a rule so we can track line numbers
r'\n+'
t.lexer.lineno += 1
def t_COMMENT(t):
r'\#.*'
pass
# A string containing ignored characters (spaces and tabs)
t_ignore = ' \t'
def t_error(t):
# Error handling rule
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
lexer = lex.lex()
# -----------------------------
# Parsing rules
def p_unit(p):
"""
unit : definition_list
"""
p[0] = p[1]
def p_definition_list(p):
"""
definition_list : definition definition_list
| definition
"""
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_action_list'")
def p_definition(p):
"""
definition : define_service
| define_client
"""
p[0] = p[1]
def p_define_service(p):
"""
define_service : SERVICE IDENTIFIER OPEN_CURLY_BRACKET settings operation_list CLOSE_CURLY_BRACKET
| SERVICE IDENTIFIER OPEN_CURLY_BRACKET operation_list CLOSE_CURLY_BRACKET
"""
if len(p) == 7:
body = p[4] + p[5]
else:
body = p[4]
p[0] = DefineService(p[2], body)
def p_settings(p):
"""
settings : SETTINGS OPEN_CURLY_BRACKET setting_list CLOSE_CURLY_BRACKET
"""
p[0] = Settings(**p[3])
def p_setting_list(p):
"""
setting_list : setting setting_list
| setting
"""
if len(p) == 3:
p[0] = merge_map(p[1], p[2])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_setting_list'")
def p_setting(p):
"""
setting : queue
| autoscaling
| throttling
"""
p[0] = p[1]
def p_throttling(p):
"""
throttling : THROTTLING COLON NONE
| THROTTLING COLON TAIL_DROP OPEN_BRACKET NUMBER CLOSE_BRACKET
"""
throttling = NoThrottlingSettings()
if len(p) == 7:
throttling = TailDropSettings(int(p[5]))
p[0] = {"throttling": throttling}
def p_autoscaling(p):
"""
autoscaling : AUTOSCALING OPEN_CURLY_BRACKET autoscaling_setting_list CLOSE_CURLY_BRACKET
"""
p[0] = {"autoscaling": Autoscaling(**p[3])}
def p_autoscaling_setting_list(p):
"""
autoscaling_setting_list : autoscaling_setting autoscaling_setting_list
| autoscaling_setting
"""
if len(p) == 3:
p[0] = merge_map(p[1], p[2])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production in 'autoscaling_setting_list'")
def p_autoscaling_setting(p):
"""
autoscaling_setting : PERIOD COLON NUMBER
| LIMITS COLON OPEN_SQUARE_BRACKET NUMBER COMMA NUMBER CLOSE_SQUARE_BRACKET
"""
if len(p) == 8:
p[0] = {"limits": (int(p[4]), int(p[6]))}
elif len(p) == 4:
p[0] = {"period": int(p[3])}
else:
raise RuntimeError("Invalid product in 'autoscaling_setting'")
def p_operation_list(p):
"""
operation_list : define_operation operation_list
| define_operation
"""
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_operation_list'")
def p_define_client(p):
"""
define_client : CLIENT IDENTIFIER OPEN_CURLY_BRACKET EVERY NUMBER OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET CLOSE_CURLY_BRACKET
"""
p[0] = DefineClientStub(p[2], int(p[5]), p[7])
def p_define_operation(p):
"""
define_operation : OPERATION IDENTIFIER OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
p[0] = DefineOperation(p[2], p[4])
def p_action_list(p):
"""
action_list : action action_list
| action
"""
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_action_list'")
def p_action(p):
"""
action : invoke
| query
| think
| fail
| retry
| ignore
"""
p[0] = p[1]
def p_think(p):
"""
think : THINK NUMBER
"""
p[0] = Think(int(p[2]))
def p_fail(p):
"""
fail : FAIL NUMBER
| FAIL
"""
if len(p) > 2:
p[0] = Fail(float(p[2]))
else:
p[0] = Fail()
def p_query(p):
"""
query : QUERY IDENTIFIER SLASH IDENTIFIER
| QUERY IDENTIFIER SLASH IDENTIFIER OPEN_CURLY_BRACKET query_option_list CLOSE_CURLY_BRACKET
"""
parameters = {"service": p[2], "operation": p[4]}
if len(p) > 5:
parameters = merge_map(parameters, p[6])
p[0] = Query(**parameters)
def p_query_option_list(p):
"""
query_option_list : query_option COMMA query_option_list
| query_option
"""
if len(p) == 2:
p[0] = p[1]
elif len(p) == 4:
p[0] = merge_map(p[1], p[3])
else:
raise RuntimeError("Invalid product rules for 'query_option_list'")
def p_query_option(p):
"""
query_option : timeout
| priority
"""
p[0] = p[1]
def p_timeout(p):
"""
timeout : TIMEOUT COLON NUMBER
"""
p[0] = {"timeout": int(p[3])}
def p_priority(p):
"""
priority : PRIORITY COLON NUMBER
"""
p[0] = {"priority": int(p[3])}
def p_invoke(p):
"""
invoke : INVOKE IDENTIFIER SLASH IDENTIFIER
| INVOKE IDENTIFIER SLASH IDENTIFIER OPEN_CURLY_BRACKET PRIORITY COLON NUMBER CLOSE_CURLY_BRACKET
"""
priority = None
if len(p) > 5:
priority = int(p[8])
p[0] = Trigger(p[2], p[4], priority)
def p_retry(p):
"""
retry : RETRY OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
| RETRY OPEN_BRACKET retry_option_list CLOSE_BRACKET OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
if len(p) == 5:
p[0] = Retry(p[3])
elif len(p) == 8:
p[0] = Retry(p[6], **p[3])
else:
raise RuntimeError("Invalid product rules for 'retry_option_list'")
def p_retry_option_list(p):
"""
retry_option_list : retry_option COMMA retry_option_list
| retry_option
"""
if len(p) == 4:
p[0] = merge_map(p[1], p[3])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production in 'retry_option_list'")
def p_retry_option(p):
"""
retry_option : LIMIT COLON NUMBER
| DELAY COLON IDENTIFIER OPEN_BRACKET NUMBER CLOSE_BRACKET
"""
if len(p) == 4:
p[0] = {"limit": int(p[3]) }
elif len(p) == 7:
p[0] = {"delay": Delay(int(p[5]), p[3])}
else:
raise RuntimeError("Invalid production in 'retry_option'")
def p_ignore(p):
"""
ignore : IGNORE OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
p[0] = IgnoreError(p[3])
def p_error(t):
raise MADSyntaxError((t.lineno, t.lexpos), t.value)
def merge_map(map_A, map_B):
tmp = map_A.copy()
tmp.update(map_B)
return tmp
class MADSyntaxError(BaseException):
def __init__(self, position, hint):
self.position = position
self.hint = hint
@property
def line_number(self):
return self.position[0]
def __repr__(self):
return "Syntax error at line {line:d}, around '{hint}'.".format(
line=self.position[0],
hint=self.hint)
class Parser:
def __init__(self, file_system, root_file):
self.root_file = root_file
self.file_system = file_system
def parse(self, entry_rule="unit", logger=yacc.NullLogger()):
lexer.lineno = 1
text = self._content()
parser = yacc.yacc(start=entry_rule, errorlog=logger)
return parser.parse(lexer=lexer, input=text)
def _content(self):
lines = self.file_system.open_input_stream(self.root_file).readlines()
return "\n".join(lines)
|
fchauvel/MAD
|
mad/parsing.py
|
p_throttling
|
python
|
def p_throttling(p):
throttling = NoThrottlingSettings()
if len(p) == 7:
throttling = TailDropSettings(int(p[5]))
p[0] = {"throttling": throttling}
|
throttling : THROTTLING COLON NONE
| THROTTLING COLON TAIL_DROP OPEN_BRACKET NUMBER CLOSE_BRACKET
|
train
|
https://github.com/fchauvel/MAD/blob/806d5174848b1a502e5c683894995602478c448b/mad/parsing.py#L199-L207
| null |
#!/usr/bin/env python
#
# This file is part of MAD.
#
# MAD is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MAD is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MAD. If not, see <http://www.gnu.org/licenses/>.
#
import ply.lex as lex
import ply.yacc as yacc
from mad.ast.settings import *
from mad.ast.definitions import *
from mad.ast.actions import *
reserved = {
"autoscaling": "AUTOSCALING",
"client": "CLIENT",
"delay": "DELAY",
"every": "EVERY",
"fail": "FAIL",
"FIFO": "FIFO",
"ignore": "IGNORE",
"invoke": "INVOKE",
"LIFO": "LIFO",
"limit": "LIMIT",
"limits": "LIMITS",
"none": "NONE",
"operation": "OPERATION",
"period": "PERIOD",
"priority": "PRIORITY",
"queue": "QUEUE",
"query": "QUERY",
"retry": "RETRY",
"service": "SERVICE",
"settings": "SETTINGS",
"tail-drop": "TAIL_DROP",
"think": "THINK",
"throttling": "THROTTLING",
"timeout": "TIMEOUT"
}
# List of token names. This is always required
tokens = [ "CLOSE_BRACKET",
"CLOSE_CURLY_BRACKET",
"CLOSE_SQUARE_BRACKET",
"COLON",
"COMMA",
"IDENTIFIER",
"OPEN_BRACKET",
"OPEN_CURLY_BRACKET",
"OPEN_SQUARE_BRACKET",
"NUMBER",
"REAL",
"SLASH"] + list(reserved.values())
t_CLOSE_BRACKET = r"\)"
t_CLOSE_CURLY_BRACKET = r"\}"
t_CLOSE_SQUARE_BRACKET = r"\]"
t_COLON = r":"
t_COMMA = r","
t_OPEN_BRACKET = r"\("
t_OPEN_CURLY_BRACKET = r"\{"
t_OPEN_SQUARE_BRACKET = r"\["
t_SLASH = r"/"
def t_IDENTIFIER(t):
r'[a-zA-Z_][a-zA-Z0-9_\-]+'
t.type = reserved.get(t.value,'IDENTIFIER') # Check for reserved words
return t
def t_NUMBER(t):
r'[+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?'
return t
def t_newline(t):
# Define a rule so we can track line numbers
r'\n+'
t.lexer.lineno += 1
def t_COMMENT(t):
r'\#.*'
pass
# A string containing ignored characters (spaces and tabs)
t_ignore = ' \t'
def t_error(t):
# Error handling rule
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
lexer = lex.lex()
# -----------------------------
# Parsing rules
def p_unit(p):
"""
unit : definition_list
"""
p[0] = p[1]
def p_definition_list(p):
"""
definition_list : definition definition_list
| definition
"""
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_action_list'")
def p_definition(p):
"""
definition : define_service
| define_client
"""
p[0] = p[1]
def p_define_service(p):
"""
define_service : SERVICE IDENTIFIER OPEN_CURLY_BRACKET settings operation_list CLOSE_CURLY_BRACKET
| SERVICE IDENTIFIER OPEN_CURLY_BRACKET operation_list CLOSE_CURLY_BRACKET
"""
if len(p) == 7:
body = p[4] + p[5]
else:
body = p[4]
p[0] = DefineService(p[2], body)
def p_settings(p):
"""
settings : SETTINGS OPEN_CURLY_BRACKET setting_list CLOSE_CURLY_BRACKET
"""
p[0] = Settings(**p[3])
def p_setting_list(p):
"""
setting_list : setting setting_list
| setting
"""
if len(p) == 3:
p[0] = merge_map(p[1], p[2])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_setting_list'")
def p_setting(p):
"""
setting : queue
| autoscaling
| throttling
"""
p[0] = p[1]
def p_queue(p):
"""
queue : QUEUE COLON LIFO
| QUEUE COLON FIFO
"""
if p[3] == "LIFO":
p[0] = {"queue": LIFO()}
elif p[3] == "FIFO":
p[0] = {"queue": FIFO()}
else:
raise RuntimeError("Queue discipline '%s' is not supported!" % p[1])
def p_autoscaling(p):
"""
autoscaling : AUTOSCALING OPEN_CURLY_BRACKET autoscaling_setting_list CLOSE_CURLY_BRACKET
"""
p[0] = {"autoscaling": Autoscaling(**p[3])}
def p_autoscaling_setting_list(p):
"""
autoscaling_setting_list : autoscaling_setting autoscaling_setting_list
| autoscaling_setting
"""
if len(p) == 3:
p[0] = merge_map(p[1], p[2])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production in 'autoscaling_setting_list'")
def p_autoscaling_setting(p):
"""
autoscaling_setting : PERIOD COLON NUMBER
| LIMITS COLON OPEN_SQUARE_BRACKET NUMBER COMMA NUMBER CLOSE_SQUARE_BRACKET
"""
if len(p) == 8:
p[0] = {"limits": (int(p[4]), int(p[6]))}
elif len(p) == 4:
p[0] = {"period": int(p[3])}
else:
raise RuntimeError("Invalid product in 'autoscaling_setting'")
def p_operation_list(p):
"""
operation_list : define_operation operation_list
| define_operation
"""
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_operation_list'")
def p_define_client(p):
"""
define_client : CLIENT IDENTIFIER OPEN_CURLY_BRACKET EVERY NUMBER OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET CLOSE_CURLY_BRACKET
"""
p[0] = DefineClientStub(p[2], int(p[5]), p[7])
def p_define_operation(p):
"""
define_operation : OPERATION IDENTIFIER OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
p[0] = DefineOperation(p[2], p[4])
def p_action_list(p):
"""
action_list : action action_list
| action
"""
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_action_list'")
def p_action(p):
"""
action : invoke
| query
| think
| fail
| retry
| ignore
"""
p[0] = p[1]
def p_think(p):
"""
think : THINK NUMBER
"""
p[0] = Think(int(p[2]))
def p_fail(p):
"""
fail : FAIL NUMBER
| FAIL
"""
if len(p) > 2:
p[0] = Fail(float(p[2]))
else:
p[0] = Fail()
def p_query(p):
"""
query : QUERY IDENTIFIER SLASH IDENTIFIER
| QUERY IDENTIFIER SLASH IDENTIFIER OPEN_CURLY_BRACKET query_option_list CLOSE_CURLY_BRACKET
"""
parameters = {"service": p[2], "operation": p[4]}
if len(p) > 5:
parameters = merge_map(parameters, p[6])
p[0] = Query(**parameters)
def p_query_option_list(p):
"""
query_option_list : query_option COMMA query_option_list
| query_option
"""
if len(p) == 2:
p[0] = p[1]
elif len(p) == 4:
p[0] = merge_map(p[1], p[3])
else:
raise RuntimeError("Invalid product rules for 'query_option_list'")
def p_query_option(p):
"""
query_option : timeout
| priority
"""
p[0] = p[1]
def p_timeout(p):
"""
timeout : TIMEOUT COLON NUMBER
"""
p[0] = {"timeout": int(p[3])}
def p_priority(p):
"""
priority : PRIORITY COLON NUMBER
"""
p[0] = {"priority": int(p[3])}
def p_invoke(p):
"""
invoke : INVOKE IDENTIFIER SLASH IDENTIFIER
| INVOKE IDENTIFIER SLASH IDENTIFIER OPEN_CURLY_BRACKET PRIORITY COLON NUMBER CLOSE_CURLY_BRACKET
"""
priority = None
if len(p) > 5:
priority = int(p[8])
p[0] = Trigger(p[2], p[4], priority)
def p_retry(p):
"""
retry : RETRY OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
| RETRY OPEN_BRACKET retry_option_list CLOSE_BRACKET OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
if len(p) == 5:
p[0] = Retry(p[3])
elif len(p) == 8:
p[0] = Retry(p[6], **p[3])
else:
raise RuntimeError("Invalid product rules for 'retry_option_list'")
def p_retry_option_list(p):
"""
retry_option_list : retry_option COMMA retry_option_list
| retry_option
"""
if len(p) == 4:
p[0] = merge_map(p[1], p[3])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production in 'retry_option_list'")
def p_retry_option(p):
"""
retry_option : LIMIT COLON NUMBER
| DELAY COLON IDENTIFIER OPEN_BRACKET NUMBER CLOSE_BRACKET
"""
if len(p) == 4:
p[0] = {"limit": int(p[3]) }
elif len(p) == 7:
p[0] = {"delay": Delay(int(p[5]), p[3])}
else:
raise RuntimeError("Invalid production in 'retry_option'")
def p_ignore(p):
"""
ignore : IGNORE OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
p[0] = IgnoreError(p[3])
def p_error(t):
raise MADSyntaxError((t.lineno, t.lexpos), t.value)
def merge_map(map_A, map_B):
tmp = map_A.copy()
tmp.update(map_B)
return tmp
class MADSyntaxError(BaseException):
def __init__(self, position, hint):
self.position = position
self.hint = hint
@property
def line_number(self):
return self.position[0]
def __repr__(self):
return "Syntax error at line {line:d}, around '{hint}'.".format(
line=self.position[0],
hint=self.hint)
class Parser:
def __init__(self, file_system, root_file):
self.root_file = root_file
self.file_system = file_system
def parse(self, entry_rule="unit", logger=yacc.NullLogger()):
lexer.lineno = 1
text = self._content()
parser = yacc.yacc(start=entry_rule, errorlog=logger)
return parser.parse(lexer=lexer, input=text)
def _content(self):
lines = self.file_system.open_input_stream(self.root_file).readlines()
return "\n".join(lines)
|
fchauvel/MAD
|
mad/parsing.py
|
p_autoscaling_setting_list
|
python
|
def p_autoscaling_setting_list(p):
if len(p) == 3:
p[0] = merge_map(p[1], p[2])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production in 'autoscaling_setting_list'")
|
autoscaling_setting_list : autoscaling_setting autoscaling_setting_list
| autoscaling_setting
|
train
|
https://github.com/fchauvel/MAD/blob/806d5174848b1a502e5c683894995602478c448b/mad/parsing.py#L217-L227
|
[
"def merge_map(map_A, map_B):\n tmp = map_A.copy()\n tmp.update(map_B)\n return tmp\n"
] |
#!/usr/bin/env python
#
# This file is part of MAD.
#
# MAD is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MAD is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MAD. If not, see <http://www.gnu.org/licenses/>.
#
import ply.lex as lex
import ply.yacc as yacc
from mad.ast.settings import *
from mad.ast.definitions import *
from mad.ast.actions import *
reserved = {
"autoscaling": "AUTOSCALING",
"client": "CLIENT",
"delay": "DELAY",
"every": "EVERY",
"fail": "FAIL",
"FIFO": "FIFO",
"ignore": "IGNORE",
"invoke": "INVOKE",
"LIFO": "LIFO",
"limit": "LIMIT",
"limits": "LIMITS",
"none": "NONE",
"operation": "OPERATION",
"period": "PERIOD",
"priority": "PRIORITY",
"queue": "QUEUE",
"query": "QUERY",
"retry": "RETRY",
"service": "SERVICE",
"settings": "SETTINGS",
"tail-drop": "TAIL_DROP",
"think": "THINK",
"throttling": "THROTTLING",
"timeout": "TIMEOUT"
}
# List of token names. This is always required
tokens = [ "CLOSE_BRACKET",
"CLOSE_CURLY_BRACKET",
"CLOSE_SQUARE_BRACKET",
"COLON",
"COMMA",
"IDENTIFIER",
"OPEN_BRACKET",
"OPEN_CURLY_BRACKET",
"OPEN_SQUARE_BRACKET",
"NUMBER",
"REAL",
"SLASH"] + list(reserved.values())
t_CLOSE_BRACKET = r"\)"
t_CLOSE_CURLY_BRACKET = r"\}"
t_CLOSE_SQUARE_BRACKET = r"\]"
t_COLON = r":"
t_COMMA = r","
t_OPEN_BRACKET = r"\("
t_OPEN_CURLY_BRACKET = r"\{"
t_OPEN_SQUARE_BRACKET = r"\["
t_SLASH = r"/"
def t_IDENTIFIER(t):
r'[a-zA-Z_][a-zA-Z0-9_\-]+'
t.type = reserved.get(t.value,'IDENTIFIER') # Check for reserved words
return t
def t_NUMBER(t):
r'[+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?'
return t
def t_newline(t):
# Define a rule so we can track line numbers
r'\n+'
t.lexer.lineno += 1
def t_COMMENT(t):
r'\#.*'
pass
# A string containing ignored characters (spaces and tabs)
t_ignore = ' \t'
def t_error(t):
# Error handling rule
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
lexer = lex.lex()
# -----------------------------
# Parsing rules
def p_unit(p):
"""
unit : definition_list
"""
p[0] = p[1]
def p_definition_list(p):
"""
definition_list : definition definition_list
| definition
"""
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_action_list'")
def p_definition(p):
"""
definition : define_service
| define_client
"""
p[0] = p[1]
def p_define_service(p):
"""
define_service : SERVICE IDENTIFIER OPEN_CURLY_BRACKET settings operation_list CLOSE_CURLY_BRACKET
| SERVICE IDENTIFIER OPEN_CURLY_BRACKET operation_list CLOSE_CURLY_BRACKET
"""
if len(p) == 7:
body = p[4] + p[5]
else:
body = p[4]
p[0] = DefineService(p[2], body)
def p_settings(p):
"""
settings : SETTINGS OPEN_CURLY_BRACKET setting_list CLOSE_CURLY_BRACKET
"""
p[0] = Settings(**p[3])
def p_setting_list(p):
"""
setting_list : setting setting_list
| setting
"""
if len(p) == 3:
p[0] = merge_map(p[1], p[2])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_setting_list'")
def p_setting(p):
"""
setting : queue
| autoscaling
| throttling
"""
p[0] = p[1]
def p_queue(p):
"""
queue : QUEUE COLON LIFO
| QUEUE COLON FIFO
"""
if p[3] == "LIFO":
p[0] = {"queue": LIFO()}
elif p[3] == "FIFO":
p[0] = {"queue": FIFO()}
else:
raise RuntimeError("Queue discipline '%s' is not supported!" % p[1])
def p_throttling(p):
"""
throttling : THROTTLING COLON NONE
| THROTTLING COLON TAIL_DROP OPEN_BRACKET NUMBER CLOSE_BRACKET
"""
throttling = NoThrottlingSettings()
if len(p) == 7:
throttling = TailDropSettings(int(p[5]))
p[0] = {"throttling": throttling}
def p_autoscaling(p):
"""
autoscaling : AUTOSCALING OPEN_CURLY_BRACKET autoscaling_setting_list CLOSE_CURLY_BRACKET
"""
p[0] = {"autoscaling": Autoscaling(**p[3])}
def p_autoscaling_setting(p):
"""
autoscaling_setting : PERIOD COLON NUMBER
| LIMITS COLON OPEN_SQUARE_BRACKET NUMBER COMMA NUMBER CLOSE_SQUARE_BRACKET
"""
if len(p) == 8:
p[0] = {"limits": (int(p[4]), int(p[6]))}
elif len(p) == 4:
p[0] = {"period": int(p[3])}
else:
raise RuntimeError("Invalid product in 'autoscaling_setting'")
def p_operation_list(p):
"""
operation_list : define_operation operation_list
| define_operation
"""
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_operation_list'")
def p_define_client(p):
"""
define_client : CLIENT IDENTIFIER OPEN_CURLY_BRACKET EVERY NUMBER OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET CLOSE_CURLY_BRACKET
"""
p[0] = DefineClientStub(p[2], int(p[5]), p[7])
def p_define_operation(p):
"""
define_operation : OPERATION IDENTIFIER OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
p[0] = DefineOperation(p[2], p[4])
def p_action_list(p):
"""
action_list : action action_list
| action
"""
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_action_list'")
def p_action(p):
"""
action : invoke
| query
| think
| fail
| retry
| ignore
"""
p[0] = p[1]
def p_think(p):
"""
think : THINK NUMBER
"""
p[0] = Think(int(p[2]))
def p_fail(p):
"""
fail : FAIL NUMBER
| FAIL
"""
if len(p) > 2:
p[0] = Fail(float(p[2]))
else:
p[0] = Fail()
def p_query(p):
"""
query : QUERY IDENTIFIER SLASH IDENTIFIER
| QUERY IDENTIFIER SLASH IDENTIFIER OPEN_CURLY_BRACKET query_option_list CLOSE_CURLY_BRACKET
"""
parameters = {"service": p[2], "operation": p[4]}
if len(p) > 5:
parameters = merge_map(parameters, p[6])
p[0] = Query(**parameters)
def p_query_option_list(p):
"""
query_option_list : query_option COMMA query_option_list
| query_option
"""
if len(p) == 2:
p[0] = p[1]
elif len(p) == 4:
p[0] = merge_map(p[1], p[3])
else:
raise RuntimeError("Invalid product rules for 'query_option_list'")
def p_query_option(p):
"""
query_option : timeout
| priority
"""
p[0] = p[1]
def p_timeout(p):
"""
timeout : TIMEOUT COLON NUMBER
"""
p[0] = {"timeout": int(p[3])}
def p_priority(p):
"""
priority : PRIORITY COLON NUMBER
"""
p[0] = {"priority": int(p[3])}
def p_invoke(p):
"""
invoke : INVOKE IDENTIFIER SLASH IDENTIFIER
| INVOKE IDENTIFIER SLASH IDENTIFIER OPEN_CURLY_BRACKET PRIORITY COLON NUMBER CLOSE_CURLY_BRACKET
"""
priority = None
if len(p) > 5:
priority = int(p[8])
p[0] = Trigger(p[2], p[4], priority)
def p_retry(p):
"""
retry : RETRY OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
| RETRY OPEN_BRACKET retry_option_list CLOSE_BRACKET OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
if len(p) == 5:
p[0] = Retry(p[3])
elif len(p) == 8:
p[0] = Retry(p[6], **p[3])
else:
raise RuntimeError("Invalid product rules for 'retry_option_list'")
def p_retry_option_list(p):
"""
retry_option_list : retry_option COMMA retry_option_list
| retry_option
"""
if len(p) == 4:
p[0] = merge_map(p[1], p[3])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production in 'retry_option_list'")
def p_retry_option(p):
"""
retry_option : LIMIT COLON NUMBER
| DELAY COLON IDENTIFIER OPEN_BRACKET NUMBER CLOSE_BRACKET
"""
if len(p) == 4:
p[0] = {"limit": int(p[3]) }
elif len(p) == 7:
p[0] = {"delay": Delay(int(p[5]), p[3])}
else:
raise RuntimeError("Invalid production in 'retry_option'")
def p_ignore(p):
"""
ignore : IGNORE OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
p[0] = IgnoreError(p[3])
def p_error(t):
raise MADSyntaxError((t.lineno, t.lexpos), t.value)
def merge_map(map_A, map_B):
tmp = map_A.copy()
tmp.update(map_B)
return tmp
class MADSyntaxError(BaseException):
def __init__(self, position, hint):
self.position = position
self.hint = hint
@property
def line_number(self):
return self.position[0]
def __repr__(self):
return "Syntax error at line {line:d}, around '{hint}'.".format(
line=self.position[0],
hint=self.hint)
class Parser:
def __init__(self, file_system, root_file):
self.root_file = root_file
self.file_system = file_system
def parse(self, entry_rule="unit", logger=yacc.NullLogger()):
lexer.lineno = 1
text = self._content()
parser = yacc.yacc(start=entry_rule, errorlog=logger)
return parser.parse(lexer=lexer, input=text)
def _content(self):
lines = self.file_system.open_input_stream(self.root_file).readlines()
return "\n".join(lines)
|
fchauvel/MAD
|
mad/parsing.py
|
p_autoscaling_setting
|
python
|
def p_autoscaling_setting(p):
if len(p) == 8:
p[0] = {"limits": (int(p[4]), int(p[6]))}
elif len(p) == 4:
p[0] = {"period": int(p[3])}
else:
raise RuntimeError("Invalid product in 'autoscaling_setting'")
|
autoscaling_setting : PERIOD COLON NUMBER
| LIMITS COLON OPEN_SQUARE_BRACKET NUMBER COMMA NUMBER CLOSE_SQUARE_BRACKET
|
train
|
https://github.com/fchauvel/MAD/blob/806d5174848b1a502e5c683894995602478c448b/mad/parsing.py#L230-L240
| null |
#!/usr/bin/env python
#
# This file is part of MAD.
#
# MAD is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MAD is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MAD. If not, see <http://www.gnu.org/licenses/>.
#
import ply.lex as lex
import ply.yacc as yacc
from mad.ast.settings import *
from mad.ast.definitions import *
from mad.ast.actions import *
reserved = {
"autoscaling": "AUTOSCALING",
"client": "CLIENT",
"delay": "DELAY",
"every": "EVERY",
"fail": "FAIL",
"FIFO": "FIFO",
"ignore": "IGNORE",
"invoke": "INVOKE",
"LIFO": "LIFO",
"limit": "LIMIT",
"limits": "LIMITS",
"none": "NONE",
"operation": "OPERATION",
"period": "PERIOD",
"priority": "PRIORITY",
"queue": "QUEUE",
"query": "QUERY",
"retry": "RETRY",
"service": "SERVICE",
"settings": "SETTINGS",
"tail-drop": "TAIL_DROP",
"think": "THINK",
"throttling": "THROTTLING",
"timeout": "TIMEOUT"
}
# List of token names. This is always required
tokens = [ "CLOSE_BRACKET",
"CLOSE_CURLY_BRACKET",
"CLOSE_SQUARE_BRACKET",
"COLON",
"COMMA",
"IDENTIFIER",
"OPEN_BRACKET",
"OPEN_CURLY_BRACKET",
"OPEN_SQUARE_BRACKET",
"NUMBER",
"REAL",
"SLASH"] + list(reserved.values())
t_CLOSE_BRACKET = r"\)"
t_CLOSE_CURLY_BRACKET = r"\}"
t_CLOSE_SQUARE_BRACKET = r"\]"
t_COLON = r":"
t_COMMA = r","
t_OPEN_BRACKET = r"\("
t_OPEN_CURLY_BRACKET = r"\{"
t_OPEN_SQUARE_BRACKET = r"\["
t_SLASH = r"/"
def t_IDENTIFIER(t):
r'[a-zA-Z_][a-zA-Z0-9_\-]+'
t.type = reserved.get(t.value,'IDENTIFIER') # Check for reserved words
return t
def t_NUMBER(t):
r'[+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?'
return t
def t_newline(t):
# Define a rule so we can track line numbers
r'\n+'
t.lexer.lineno += 1
def t_COMMENT(t):
r'\#.*'
pass
# A string containing ignored characters (spaces and tabs)
t_ignore = ' \t'
def t_error(t):
# Error handling rule
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
lexer = lex.lex()
# -----------------------------
# Parsing rules
def p_unit(p):
"""
unit : definition_list
"""
p[0] = p[1]
def p_definition_list(p):
"""
definition_list : definition definition_list
| definition
"""
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_action_list'")
def p_definition(p):
"""
definition : define_service
| define_client
"""
p[0] = p[1]
def p_define_service(p):
"""
define_service : SERVICE IDENTIFIER OPEN_CURLY_BRACKET settings operation_list CLOSE_CURLY_BRACKET
| SERVICE IDENTIFIER OPEN_CURLY_BRACKET operation_list CLOSE_CURLY_BRACKET
"""
if len(p) == 7:
body = p[4] + p[5]
else:
body = p[4]
p[0] = DefineService(p[2], body)
def p_settings(p):
"""
settings : SETTINGS OPEN_CURLY_BRACKET setting_list CLOSE_CURLY_BRACKET
"""
p[0] = Settings(**p[3])
def p_setting_list(p):
"""
setting_list : setting setting_list
| setting
"""
if len(p) == 3:
p[0] = merge_map(p[1], p[2])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_setting_list'")
def p_setting(p):
"""
setting : queue
| autoscaling
| throttling
"""
p[0] = p[1]
def p_queue(p):
"""
queue : QUEUE COLON LIFO
| QUEUE COLON FIFO
"""
if p[3] == "LIFO":
p[0] = {"queue": LIFO()}
elif p[3] == "FIFO":
p[0] = {"queue": FIFO()}
else:
raise RuntimeError("Queue discipline '%s' is not supported!" % p[1])
def p_throttling(p):
"""
throttling : THROTTLING COLON NONE
| THROTTLING COLON TAIL_DROP OPEN_BRACKET NUMBER CLOSE_BRACKET
"""
throttling = NoThrottlingSettings()
if len(p) == 7:
throttling = TailDropSettings(int(p[5]))
p[0] = {"throttling": throttling}
def p_autoscaling(p):
"""
autoscaling : AUTOSCALING OPEN_CURLY_BRACKET autoscaling_setting_list CLOSE_CURLY_BRACKET
"""
p[0] = {"autoscaling": Autoscaling(**p[3])}
def p_autoscaling_setting_list(p):
"""
autoscaling_setting_list : autoscaling_setting autoscaling_setting_list
| autoscaling_setting
"""
if len(p) == 3:
p[0] = merge_map(p[1], p[2])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production in 'autoscaling_setting_list'")
def p_operation_list(p):
"""
operation_list : define_operation operation_list
| define_operation
"""
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_operation_list'")
def p_define_client(p):
"""
define_client : CLIENT IDENTIFIER OPEN_CURLY_BRACKET EVERY NUMBER OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET CLOSE_CURLY_BRACKET
"""
p[0] = DefineClientStub(p[2], int(p[5]), p[7])
def p_define_operation(p):
"""
define_operation : OPERATION IDENTIFIER OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
p[0] = DefineOperation(p[2], p[4])
def p_action_list(p):
"""
action_list : action action_list
| action
"""
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_action_list'")
def p_action(p):
"""
action : invoke
| query
| think
| fail
| retry
| ignore
"""
p[0] = p[1]
def p_think(p):
"""
think : THINK NUMBER
"""
p[0] = Think(int(p[2]))
def p_fail(p):
"""
fail : FAIL NUMBER
| FAIL
"""
if len(p) > 2:
p[0] = Fail(float(p[2]))
else:
p[0] = Fail()
def p_query(p):
"""
query : QUERY IDENTIFIER SLASH IDENTIFIER
| QUERY IDENTIFIER SLASH IDENTIFIER OPEN_CURLY_BRACKET query_option_list CLOSE_CURLY_BRACKET
"""
parameters = {"service": p[2], "operation": p[4]}
if len(p) > 5:
parameters = merge_map(parameters, p[6])
p[0] = Query(**parameters)
def p_query_option_list(p):
"""
query_option_list : query_option COMMA query_option_list
| query_option
"""
if len(p) == 2:
p[0] = p[1]
elif len(p) == 4:
p[0] = merge_map(p[1], p[3])
else:
raise RuntimeError("Invalid product rules for 'query_option_list'")
def p_query_option(p):
"""
query_option : timeout
| priority
"""
p[0] = p[1]
def p_timeout(p):
"""
timeout : TIMEOUT COLON NUMBER
"""
p[0] = {"timeout": int(p[3])}
def p_priority(p):
"""
priority : PRIORITY COLON NUMBER
"""
p[0] = {"priority": int(p[3])}
def p_invoke(p):
"""
invoke : INVOKE IDENTIFIER SLASH IDENTIFIER
| INVOKE IDENTIFIER SLASH IDENTIFIER OPEN_CURLY_BRACKET PRIORITY COLON NUMBER CLOSE_CURLY_BRACKET
"""
priority = None
if len(p) > 5:
priority = int(p[8])
p[0] = Trigger(p[2], p[4], priority)
def p_retry(p):
"""
retry : RETRY OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
| RETRY OPEN_BRACKET retry_option_list CLOSE_BRACKET OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
if len(p) == 5:
p[0] = Retry(p[3])
elif len(p) == 8:
p[0] = Retry(p[6], **p[3])
else:
raise RuntimeError("Invalid product rules for 'retry_option_list'")
def p_retry_option_list(p):
"""
retry_option_list : retry_option COMMA retry_option_list
| retry_option
"""
if len(p) == 4:
p[0] = merge_map(p[1], p[3])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production in 'retry_option_list'")
def p_retry_option(p):
"""
retry_option : LIMIT COLON NUMBER
| DELAY COLON IDENTIFIER OPEN_BRACKET NUMBER CLOSE_BRACKET
"""
if len(p) == 4:
p[0] = {"limit": int(p[3]) }
elif len(p) == 7:
p[0] = {"delay": Delay(int(p[5]), p[3])}
else:
raise RuntimeError("Invalid production in 'retry_option'")
def p_ignore(p):
"""
ignore : IGNORE OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
p[0] = IgnoreError(p[3])
def p_error(t):
raise MADSyntaxError((t.lineno, t.lexpos), t.value)
def merge_map(map_A, map_B):
tmp = map_A.copy()
tmp.update(map_B)
return tmp
class MADSyntaxError(BaseException):
def __init__(self, position, hint):
self.position = position
self.hint = hint
@property
def line_number(self):
return self.position[0]
def __repr__(self):
return "Syntax error at line {line:d}, around '{hint}'.".format(
line=self.position[0],
hint=self.hint)
class Parser:
def __init__(self, file_system, root_file):
self.root_file = root_file
self.file_system = file_system
def parse(self, entry_rule="unit", logger=yacc.NullLogger()):
lexer.lineno = 1
text = self._content()
parser = yacc.yacc(start=entry_rule, errorlog=logger)
return parser.parse(lexer=lexer, input=text)
def _content(self):
lines = self.file_system.open_input_stream(self.root_file).readlines()
return "\n".join(lines)
|
fchauvel/MAD
|
mad/parsing.py
|
p_operation_list
|
python
|
def p_operation_list(p):
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_operation_list'")
|
operation_list : define_operation operation_list
| define_operation
|
train
|
https://github.com/fchauvel/MAD/blob/806d5174848b1a502e5c683894995602478c448b/mad/parsing.py#L243-L253
| null |
#!/usr/bin/env python
#
# This file is part of MAD.
#
# MAD is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MAD is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MAD. If not, see <http://www.gnu.org/licenses/>.
#
import ply.lex as lex
import ply.yacc as yacc
from mad.ast.settings import *
from mad.ast.definitions import *
from mad.ast.actions import *
reserved = {
"autoscaling": "AUTOSCALING",
"client": "CLIENT",
"delay": "DELAY",
"every": "EVERY",
"fail": "FAIL",
"FIFO": "FIFO",
"ignore": "IGNORE",
"invoke": "INVOKE",
"LIFO": "LIFO",
"limit": "LIMIT",
"limits": "LIMITS",
"none": "NONE",
"operation": "OPERATION",
"period": "PERIOD",
"priority": "PRIORITY",
"queue": "QUEUE",
"query": "QUERY",
"retry": "RETRY",
"service": "SERVICE",
"settings": "SETTINGS",
"tail-drop": "TAIL_DROP",
"think": "THINK",
"throttling": "THROTTLING",
"timeout": "TIMEOUT"
}
# List of token names. This is always required
tokens = [ "CLOSE_BRACKET",
"CLOSE_CURLY_BRACKET",
"CLOSE_SQUARE_BRACKET",
"COLON",
"COMMA",
"IDENTIFIER",
"OPEN_BRACKET",
"OPEN_CURLY_BRACKET",
"OPEN_SQUARE_BRACKET",
"NUMBER",
"REAL",
"SLASH"] + list(reserved.values())
t_CLOSE_BRACKET = r"\)"
t_CLOSE_CURLY_BRACKET = r"\}"
t_CLOSE_SQUARE_BRACKET = r"\]"
t_COLON = r":"
t_COMMA = r","
t_OPEN_BRACKET = r"\("
t_OPEN_CURLY_BRACKET = r"\{"
t_OPEN_SQUARE_BRACKET = r"\["
t_SLASH = r"/"
def t_IDENTIFIER(t):
r'[a-zA-Z_][a-zA-Z0-9_\-]+'
t.type = reserved.get(t.value,'IDENTIFIER') # Check for reserved words
return t
def t_NUMBER(t):
r'[+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?'
return t
def t_newline(t):
# Define a rule so we can track line numbers
r'\n+'
t.lexer.lineno += 1
def t_COMMENT(t):
r'\#.*'
pass
# A string containing ignored characters (spaces and tabs)
t_ignore = ' \t'
def t_error(t):
# Error handling rule
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
lexer = lex.lex()
# -----------------------------
# Parsing rules
def p_unit(p):
"""
unit : definition_list
"""
p[0] = p[1]
def p_definition_list(p):
"""
definition_list : definition definition_list
| definition
"""
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_action_list'")
def p_definition(p):
"""
definition : define_service
| define_client
"""
p[0] = p[1]
def p_define_service(p):
"""
define_service : SERVICE IDENTIFIER OPEN_CURLY_BRACKET settings operation_list CLOSE_CURLY_BRACKET
| SERVICE IDENTIFIER OPEN_CURLY_BRACKET operation_list CLOSE_CURLY_BRACKET
"""
if len(p) == 7:
body = p[4] + p[5]
else:
body = p[4]
p[0] = DefineService(p[2], body)
def p_settings(p):
"""
settings : SETTINGS OPEN_CURLY_BRACKET setting_list CLOSE_CURLY_BRACKET
"""
p[0] = Settings(**p[3])
def p_setting_list(p):
"""
setting_list : setting setting_list
| setting
"""
if len(p) == 3:
p[0] = merge_map(p[1], p[2])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_setting_list'")
def p_setting(p):
"""
setting : queue
| autoscaling
| throttling
"""
p[0] = p[1]
def p_queue(p):
"""
queue : QUEUE COLON LIFO
| QUEUE COLON FIFO
"""
if p[3] == "LIFO":
p[0] = {"queue": LIFO()}
elif p[3] == "FIFO":
p[0] = {"queue": FIFO()}
else:
raise RuntimeError("Queue discipline '%s' is not supported!" % p[1])
def p_throttling(p):
"""
throttling : THROTTLING COLON NONE
| THROTTLING COLON TAIL_DROP OPEN_BRACKET NUMBER CLOSE_BRACKET
"""
throttling = NoThrottlingSettings()
if len(p) == 7:
throttling = TailDropSettings(int(p[5]))
p[0] = {"throttling": throttling}
def p_autoscaling(p):
"""
autoscaling : AUTOSCALING OPEN_CURLY_BRACKET autoscaling_setting_list CLOSE_CURLY_BRACKET
"""
p[0] = {"autoscaling": Autoscaling(**p[3])}
def p_autoscaling_setting_list(p):
"""
autoscaling_setting_list : autoscaling_setting autoscaling_setting_list
| autoscaling_setting
"""
if len(p) == 3:
p[0] = merge_map(p[1], p[2])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production in 'autoscaling_setting_list'")
def p_autoscaling_setting(p):
"""
autoscaling_setting : PERIOD COLON NUMBER
| LIMITS COLON OPEN_SQUARE_BRACKET NUMBER COMMA NUMBER CLOSE_SQUARE_BRACKET
"""
if len(p) == 8:
p[0] = {"limits": (int(p[4]), int(p[6]))}
elif len(p) == 4:
p[0] = {"period": int(p[3])}
else:
raise RuntimeError("Invalid product in 'autoscaling_setting'")
def p_define_client(p):
"""
define_client : CLIENT IDENTIFIER OPEN_CURLY_BRACKET EVERY NUMBER OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET CLOSE_CURLY_BRACKET
"""
p[0] = DefineClientStub(p[2], int(p[5]), p[7])
def p_define_operation(p):
"""
define_operation : OPERATION IDENTIFIER OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
p[0] = DefineOperation(p[2], p[4])
def p_action_list(p):
"""
action_list : action action_list
| action
"""
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_action_list'")
def p_action(p):
"""
action : invoke
| query
| think
| fail
| retry
| ignore
"""
p[0] = p[1]
def p_think(p):
"""
think : THINK NUMBER
"""
p[0] = Think(int(p[2]))
def p_fail(p):
"""
fail : FAIL NUMBER
| FAIL
"""
if len(p) > 2:
p[0] = Fail(float(p[2]))
else:
p[0] = Fail()
def p_query(p):
"""
query : QUERY IDENTIFIER SLASH IDENTIFIER
| QUERY IDENTIFIER SLASH IDENTIFIER OPEN_CURLY_BRACKET query_option_list CLOSE_CURLY_BRACKET
"""
parameters = {"service": p[2], "operation": p[4]}
if len(p) > 5:
parameters = merge_map(parameters, p[6])
p[0] = Query(**parameters)
def p_query_option_list(p):
"""
query_option_list : query_option COMMA query_option_list
| query_option
"""
if len(p) == 2:
p[0] = p[1]
elif len(p) == 4:
p[0] = merge_map(p[1], p[3])
else:
raise RuntimeError("Invalid product rules for 'query_option_list'")
def p_query_option(p):
"""
query_option : timeout
| priority
"""
p[0] = p[1]
def p_timeout(p):
"""
timeout : TIMEOUT COLON NUMBER
"""
p[0] = {"timeout": int(p[3])}
def p_priority(p):
"""
priority : PRIORITY COLON NUMBER
"""
p[0] = {"priority": int(p[3])}
def p_invoke(p):
"""
invoke : INVOKE IDENTIFIER SLASH IDENTIFIER
| INVOKE IDENTIFIER SLASH IDENTIFIER OPEN_CURLY_BRACKET PRIORITY COLON NUMBER CLOSE_CURLY_BRACKET
"""
priority = None
if len(p) > 5:
priority = int(p[8])
p[0] = Trigger(p[2], p[4], priority)
def p_retry(p):
"""
retry : RETRY OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
| RETRY OPEN_BRACKET retry_option_list CLOSE_BRACKET OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
if len(p) == 5:
p[0] = Retry(p[3])
elif len(p) == 8:
p[0] = Retry(p[6], **p[3])
else:
raise RuntimeError("Invalid product rules for 'retry_option_list'")
def p_retry_option_list(p):
"""
retry_option_list : retry_option COMMA retry_option_list
| retry_option
"""
if len(p) == 4:
p[0] = merge_map(p[1], p[3])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production in 'retry_option_list'")
def p_retry_option(p):
"""
retry_option : LIMIT COLON NUMBER
| DELAY COLON IDENTIFIER OPEN_BRACKET NUMBER CLOSE_BRACKET
"""
if len(p) == 4:
p[0] = {"limit": int(p[3]) }
elif len(p) == 7:
p[0] = {"delay": Delay(int(p[5]), p[3])}
else:
raise RuntimeError("Invalid production in 'retry_option'")
def p_ignore(p):
"""
ignore : IGNORE OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
p[0] = IgnoreError(p[3])
def p_error(t):
raise MADSyntaxError((t.lineno, t.lexpos), t.value)
def merge_map(map_A, map_B):
tmp = map_A.copy()
tmp.update(map_B)
return tmp
class MADSyntaxError(BaseException):
def __init__(self, position, hint):
self.position = position
self.hint = hint
@property
def line_number(self):
return self.position[0]
def __repr__(self):
return "Syntax error at line {line:d}, around '{hint}'.".format(
line=self.position[0],
hint=self.hint)
class Parser:
def __init__(self, file_system, root_file):
self.root_file = root_file
self.file_system = file_system
def parse(self, entry_rule="unit", logger=yacc.NullLogger()):
lexer.lineno = 1
text = self._content()
parser = yacc.yacc(start=entry_rule, errorlog=logger)
return parser.parse(lexer=lexer, input=text)
def _content(self):
lines = self.file_system.open_input_stream(self.root_file).readlines()
return "\n".join(lines)
|
fchauvel/MAD
|
mad/parsing.py
|
p_action_list
|
python
|
def p_action_list(p):
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_action_list'")
|
action_list : action action_list
| action
|
train
|
https://github.com/fchauvel/MAD/blob/806d5174848b1a502e5c683894995602478c448b/mad/parsing.py#L270-L280
| null |
#!/usr/bin/env python
#
# This file is part of MAD.
#
# MAD is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MAD is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MAD. If not, see <http://www.gnu.org/licenses/>.
#
import ply.lex as lex
import ply.yacc as yacc
from mad.ast.settings import *
from mad.ast.definitions import *
from mad.ast.actions import *
reserved = {
"autoscaling": "AUTOSCALING",
"client": "CLIENT",
"delay": "DELAY",
"every": "EVERY",
"fail": "FAIL",
"FIFO": "FIFO",
"ignore": "IGNORE",
"invoke": "INVOKE",
"LIFO": "LIFO",
"limit": "LIMIT",
"limits": "LIMITS",
"none": "NONE",
"operation": "OPERATION",
"period": "PERIOD",
"priority": "PRIORITY",
"queue": "QUEUE",
"query": "QUERY",
"retry": "RETRY",
"service": "SERVICE",
"settings": "SETTINGS",
"tail-drop": "TAIL_DROP",
"think": "THINK",
"throttling": "THROTTLING",
"timeout": "TIMEOUT"
}
# List of token names. This is always required
tokens = [ "CLOSE_BRACKET",
"CLOSE_CURLY_BRACKET",
"CLOSE_SQUARE_BRACKET",
"COLON",
"COMMA",
"IDENTIFIER",
"OPEN_BRACKET",
"OPEN_CURLY_BRACKET",
"OPEN_SQUARE_BRACKET",
"NUMBER",
"REAL",
"SLASH"] + list(reserved.values())
t_CLOSE_BRACKET = r"\)"
t_CLOSE_CURLY_BRACKET = r"\}"
t_CLOSE_SQUARE_BRACKET = r"\]"
t_COLON = r":"
t_COMMA = r","
t_OPEN_BRACKET = r"\("
t_OPEN_CURLY_BRACKET = r"\{"
t_OPEN_SQUARE_BRACKET = r"\["
t_SLASH = r"/"
def t_IDENTIFIER(t):
r'[a-zA-Z_][a-zA-Z0-9_\-]+'
t.type = reserved.get(t.value,'IDENTIFIER') # Check for reserved words
return t
def t_NUMBER(t):
r'[+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?'
return t
def t_newline(t):
# Define a rule so we can track line numbers
r'\n+'
t.lexer.lineno += 1
def t_COMMENT(t):
r'\#.*'
pass
# A string containing ignored characters (spaces and tabs)
t_ignore = ' \t'
def t_error(t):
# Error handling rule
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
lexer = lex.lex()
# -----------------------------
# Parsing rules
def p_unit(p):
"""
unit : definition_list
"""
p[0] = p[1]
def p_definition_list(p):
"""
definition_list : definition definition_list
| definition
"""
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_action_list'")
def p_definition(p):
"""
definition : define_service
| define_client
"""
p[0] = p[1]
def p_define_service(p):
"""
define_service : SERVICE IDENTIFIER OPEN_CURLY_BRACKET settings operation_list CLOSE_CURLY_BRACKET
| SERVICE IDENTIFIER OPEN_CURLY_BRACKET operation_list CLOSE_CURLY_BRACKET
"""
if len(p) == 7:
body = p[4] + p[5]
else:
body = p[4]
p[0] = DefineService(p[2], body)
def p_settings(p):
"""
settings : SETTINGS OPEN_CURLY_BRACKET setting_list CLOSE_CURLY_BRACKET
"""
p[0] = Settings(**p[3])
def p_setting_list(p):
"""
setting_list : setting setting_list
| setting
"""
if len(p) == 3:
p[0] = merge_map(p[1], p[2])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_setting_list'")
def p_setting(p):
"""
setting : queue
| autoscaling
| throttling
"""
p[0] = p[1]
def p_queue(p):
"""
queue : QUEUE COLON LIFO
| QUEUE COLON FIFO
"""
if p[3] == "LIFO":
p[0] = {"queue": LIFO()}
elif p[3] == "FIFO":
p[0] = {"queue": FIFO()}
else:
raise RuntimeError("Queue discipline '%s' is not supported!" % p[1])
def p_throttling(p):
"""
throttling : THROTTLING COLON NONE
| THROTTLING COLON TAIL_DROP OPEN_BRACKET NUMBER CLOSE_BRACKET
"""
throttling = NoThrottlingSettings()
if len(p) == 7:
throttling = TailDropSettings(int(p[5]))
p[0] = {"throttling": throttling}
def p_autoscaling(p):
"""
autoscaling : AUTOSCALING OPEN_CURLY_BRACKET autoscaling_setting_list CLOSE_CURLY_BRACKET
"""
p[0] = {"autoscaling": Autoscaling(**p[3])}
def p_autoscaling_setting_list(p):
"""
autoscaling_setting_list : autoscaling_setting autoscaling_setting_list
| autoscaling_setting
"""
if len(p) == 3:
p[0] = merge_map(p[1], p[2])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production in 'autoscaling_setting_list'")
def p_autoscaling_setting(p):
"""
autoscaling_setting : PERIOD COLON NUMBER
| LIMITS COLON OPEN_SQUARE_BRACKET NUMBER COMMA NUMBER CLOSE_SQUARE_BRACKET
"""
if len(p) == 8:
p[0] = {"limits": (int(p[4]), int(p[6]))}
elif len(p) == 4:
p[0] = {"period": int(p[3])}
else:
raise RuntimeError("Invalid product in 'autoscaling_setting'")
def p_operation_list(p):
"""
operation_list : define_operation operation_list
| define_operation
"""
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_operation_list'")
def p_define_client(p):
"""
define_client : CLIENT IDENTIFIER OPEN_CURLY_BRACKET EVERY NUMBER OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET CLOSE_CURLY_BRACKET
"""
p[0] = DefineClientStub(p[2], int(p[5]), p[7])
def p_define_operation(p):
"""
define_operation : OPERATION IDENTIFIER OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
p[0] = DefineOperation(p[2], p[4])
def p_action(p):
"""
action : invoke
| query
| think
| fail
| retry
| ignore
"""
p[0] = p[1]
def p_think(p):
"""
think : THINK NUMBER
"""
p[0] = Think(int(p[2]))
def p_fail(p):
"""
fail : FAIL NUMBER
| FAIL
"""
if len(p) > 2:
p[0] = Fail(float(p[2]))
else:
p[0] = Fail()
def p_query(p):
"""
query : QUERY IDENTIFIER SLASH IDENTIFIER
| QUERY IDENTIFIER SLASH IDENTIFIER OPEN_CURLY_BRACKET query_option_list CLOSE_CURLY_BRACKET
"""
parameters = {"service": p[2], "operation": p[4]}
if len(p) > 5:
parameters = merge_map(parameters, p[6])
p[0] = Query(**parameters)
def p_query_option_list(p):
"""
query_option_list : query_option COMMA query_option_list
| query_option
"""
if len(p) == 2:
p[0] = p[1]
elif len(p) == 4:
p[0] = merge_map(p[1], p[3])
else:
raise RuntimeError("Invalid product rules for 'query_option_list'")
def p_query_option(p):
"""
query_option : timeout
| priority
"""
p[0] = p[1]
def p_timeout(p):
"""
timeout : TIMEOUT COLON NUMBER
"""
p[0] = {"timeout": int(p[3])}
def p_priority(p):
"""
priority : PRIORITY COLON NUMBER
"""
p[0] = {"priority": int(p[3])}
def p_invoke(p):
"""
invoke : INVOKE IDENTIFIER SLASH IDENTIFIER
| INVOKE IDENTIFIER SLASH IDENTIFIER OPEN_CURLY_BRACKET PRIORITY COLON NUMBER CLOSE_CURLY_BRACKET
"""
priority = None
if len(p) > 5:
priority = int(p[8])
p[0] = Trigger(p[2], p[4], priority)
def p_retry(p):
"""
retry : RETRY OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
| RETRY OPEN_BRACKET retry_option_list CLOSE_BRACKET OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
if len(p) == 5:
p[0] = Retry(p[3])
elif len(p) == 8:
p[0] = Retry(p[6], **p[3])
else:
raise RuntimeError("Invalid product rules for 'retry_option_list'")
def p_retry_option_list(p):
"""
retry_option_list : retry_option COMMA retry_option_list
| retry_option
"""
if len(p) == 4:
p[0] = merge_map(p[1], p[3])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production in 'retry_option_list'")
def p_retry_option(p):
"""
retry_option : LIMIT COLON NUMBER
| DELAY COLON IDENTIFIER OPEN_BRACKET NUMBER CLOSE_BRACKET
"""
if len(p) == 4:
p[0] = {"limit": int(p[3]) }
elif len(p) == 7:
p[0] = {"delay": Delay(int(p[5]), p[3])}
else:
raise RuntimeError("Invalid production in 'retry_option'")
def p_ignore(p):
"""
ignore : IGNORE OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
p[0] = IgnoreError(p[3])
def p_error(t):
raise MADSyntaxError((t.lineno, t.lexpos), t.value)
def merge_map(map_A, map_B):
tmp = map_A.copy()
tmp.update(map_B)
return tmp
class MADSyntaxError(BaseException):
def __init__(self, position, hint):
self.position = position
self.hint = hint
@property
def line_number(self):
return self.position[0]
def __repr__(self):
return "Syntax error at line {line:d}, around '{hint}'.".format(
line=self.position[0],
hint=self.hint)
class Parser:
def __init__(self, file_system, root_file):
self.root_file = root_file
self.file_system = file_system
def parse(self, entry_rule="unit", logger=yacc.NullLogger()):
lexer.lineno = 1
text = self._content()
parser = yacc.yacc(start=entry_rule, errorlog=logger)
return parser.parse(lexer=lexer, input=text)
def _content(self):
lines = self.file_system.open_input_stream(self.root_file).readlines()
return "\n".join(lines)
|
fchauvel/MAD
|
mad/parsing.py
|
p_fail
|
python
|
def p_fail(p):
if len(p) > 2:
p[0] = Fail(float(p[2]))
else:
p[0] = Fail()
|
fail : FAIL NUMBER
| FAIL
|
train
|
https://github.com/fchauvel/MAD/blob/806d5174848b1a502e5c683894995602478c448b/mad/parsing.py#L302-L310
| null |
#!/usr/bin/env python
#
# This file is part of MAD.
#
# MAD is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MAD is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MAD. If not, see <http://www.gnu.org/licenses/>.
#
import ply.lex as lex
import ply.yacc as yacc
from mad.ast.settings import *
from mad.ast.definitions import *
from mad.ast.actions import *
reserved = {
"autoscaling": "AUTOSCALING",
"client": "CLIENT",
"delay": "DELAY",
"every": "EVERY",
"fail": "FAIL",
"FIFO": "FIFO",
"ignore": "IGNORE",
"invoke": "INVOKE",
"LIFO": "LIFO",
"limit": "LIMIT",
"limits": "LIMITS",
"none": "NONE",
"operation": "OPERATION",
"period": "PERIOD",
"priority": "PRIORITY",
"queue": "QUEUE",
"query": "QUERY",
"retry": "RETRY",
"service": "SERVICE",
"settings": "SETTINGS",
"tail-drop": "TAIL_DROP",
"think": "THINK",
"throttling": "THROTTLING",
"timeout": "TIMEOUT"
}
# List of token names. This is always required
tokens = [ "CLOSE_BRACKET",
"CLOSE_CURLY_BRACKET",
"CLOSE_SQUARE_BRACKET",
"COLON",
"COMMA",
"IDENTIFIER",
"OPEN_BRACKET",
"OPEN_CURLY_BRACKET",
"OPEN_SQUARE_BRACKET",
"NUMBER",
"REAL",
"SLASH"] + list(reserved.values())
t_CLOSE_BRACKET = r"\)"
t_CLOSE_CURLY_BRACKET = r"\}"
t_CLOSE_SQUARE_BRACKET = r"\]"
t_COLON = r":"
t_COMMA = r","
t_OPEN_BRACKET = r"\("
t_OPEN_CURLY_BRACKET = r"\{"
t_OPEN_SQUARE_BRACKET = r"\["
t_SLASH = r"/"
def t_IDENTIFIER(t):
r'[a-zA-Z_][a-zA-Z0-9_\-]+'
t.type = reserved.get(t.value,'IDENTIFIER') # Check for reserved words
return t
def t_NUMBER(t):
r'[+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?'
return t
def t_newline(t):
# Define a rule so we can track line numbers
r'\n+'
t.lexer.lineno += 1
def t_COMMENT(t):
r'\#.*'
pass
# A string containing ignored characters (spaces and tabs)
t_ignore = ' \t'
def t_error(t):
# Error handling rule
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
lexer = lex.lex()
# -----------------------------
# Parsing rules
def p_unit(p):
"""
unit : definition_list
"""
p[0] = p[1]
def p_definition_list(p):
"""
definition_list : definition definition_list
| definition
"""
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_action_list'")
def p_definition(p):
"""
definition : define_service
| define_client
"""
p[0] = p[1]
def p_define_service(p):
"""
define_service : SERVICE IDENTIFIER OPEN_CURLY_BRACKET settings operation_list CLOSE_CURLY_BRACKET
| SERVICE IDENTIFIER OPEN_CURLY_BRACKET operation_list CLOSE_CURLY_BRACKET
"""
if len(p) == 7:
body = p[4] + p[5]
else:
body = p[4]
p[0] = DefineService(p[2], body)
def p_settings(p):
"""
settings : SETTINGS OPEN_CURLY_BRACKET setting_list CLOSE_CURLY_BRACKET
"""
p[0] = Settings(**p[3])
def p_setting_list(p):
"""
setting_list : setting setting_list
| setting
"""
if len(p) == 3:
p[0] = merge_map(p[1], p[2])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_setting_list'")
def p_setting(p):
"""
setting : queue
| autoscaling
| throttling
"""
p[0] = p[1]
def p_queue(p):
"""
queue : QUEUE COLON LIFO
| QUEUE COLON FIFO
"""
if p[3] == "LIFO":
p[0] = {"queue": LIFO()}
elif p[3] == "FIFO":
p[0] = {"queue": FIFO()}
else:
raise RuntimeError("Queue discipline '%s' is not supported!" % p[1])
def p_throttling(p):
"""
throttling : THROTTLING COLON NONE
| THROTTLING COLON TAIL_DROP OPEN_BRACKET NUMBER CLOSE_BRACKET
"""
throttling = NoThrottlingSettings()
if len(p) == 7:
throttling = TailDropSettings(int(p[5]))
p[0] = {"throttling": throttling}
def p_autoscaling(p):
"""
autoscaling : AUTOSCALING OPEN_CURLY_BRACKET autoscaling_setting_list CLOSE_CURLY_BRACKET
"""
p[0] = {"autoscaling": Autoscaling(**p[3])}
def p_autoscaling_setting_list(p):
"""
autoscaling_setting_list : autoscaling_setting autoscaling_setting_list
| autoscaling_setting
"""
if len(p) == 3:
p[0] = merge_map(p[1], p[2])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production in 'autoscaling_setting_list'")
def p_autoscaling_setting(p):
"""
autoscaling_setting : PERIOD COLON NUMBER
| LIMITS COLON OPEN_SQUARE_BRACKET NUMBER COMMA NUMBER CLOSE_SQUARE_BRACKET
"""
if len(p) == 8:
p[0] = {"limits": (int(p[4]), int(p[6]))}
elif len(p) == 4:
p[0] = {"period": int(p[3])}
else:
raise RuntimeError("Invalid product in 'autoscaling_setting'")
def p_operation_list(p):
"""
operation_list : define_operation operation_list
| define_operation
"""
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_operation_list'")
def p_define_client(p):
"""
define_client : CLIENT IDENTIFIER OPEN_CURLY_BRACKET EVERY NUMBER OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET CLOSE_CURLY_BRACKET
"""
p[0] = DefineClientStub(p[2], int(p[5]), p[7])
def p_define_operation(p):
"""
define_operation : OPERATION IDENTIFIER OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
p[0] = DefineOperation(p[2], p[4])
def p_action_list(p):
"""
action_list : action action_list
| action
"""
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_action_list'")
def p_action(p):
"""
action : invoke
| query
| think
| fail
| retry
| ignore
"""
p[0] = p[1]
def p_think(p):
"""
think : THINK NUMBER
"""
p[0] = Think(int(p[2]))
def p_query(p):
"""
query : QUERY IDENTIFIER SLASH IDENTIFIER
| QUERY IDENTIFIER SLASH IDENTIFIER OPEN_CURLY_BRACKET query_option_list CLOSE_CURLY_BRACKET
"""
parameters = {"service": p[2], "operation": p[4]}
if len(p) > 5:
parameters = merge_map(parameters, p[6])
p[0] = Query(**parameters)
def p_query_option_list(p):
"""
query_option_list : query_option COMMA query_option_list
| query_option
"""
if len(p) == 2:
p[0] = p[1]
elif len(p) == 4:
p[0] = merge_map(p[1], p[3])
else:
raise RuntimeError("Invalid product rules for 'query_option_list'")
def p_query_option(p):
"""
query_option : timeout
| priority
"""
p[0] = p[1]
def p_timeout(p):
"""
timeout : TIMEOUT COLON NUMBER
"""
p[0] = {"timeout": int(p[3])}
def p_priority(p):
"""
priority : PRIORITY COLON NUMBER
"""
p[0] = {"priority": int(p[3])}
def p_invoke(p):
"""
invoke : INVOKE IDENTIFIER SLASH IDENTIFIER
| INVOKE IDENTIFIER SLASH IDENTIFIER OPEN_CURLY_BRACKET PRIORITY COLON NUMBER CLOSE_CURLY_BRACKET
"""
priority = None
if len(p) > 5:
priority = int(p[8])
p[0] = Trigger(p[2], p[4], priority)
def p_retry(p):
"""
retry : RETRY OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
| RETRY OPEN_BRACKET retry_option_list CLOSE_BRACKET OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
if len(p) == 5:
p[0] = Retry(p[3])
elif len(p) == 8:
p[0] = Retry(p[6], **p[3])
else:
raise RuntimeError("Invalid product rules for 'retry_option_list'")
def p_retry_option_list(p):
"""
retry_option_list : retry_option COMMA retry_option_list
| retry_option
"""
if len(p) == 4:
p[0] = merge_map(p[1], p[3])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production in 'retry_option_list'")
def p_retry_option(p):
"""
retry_option : LIMIT COLON NUMBER
| DELAY COLON IDENTIFIER OPEN_BRACKET NUMBER CLOSE_BRACKET
"""
if len(p) == 4:
p[0] = {"limit": int(p[3]) }
elif len(p) == 7:
p[0] = {"delay": Delay(int(p[5]), p[3])}
else:
raise RuntimeError("Invalid production in 'retry_option'")
def p_ignore(p):
"""
ignore : IGNORE OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
p[0] = IgnoreError(p[3])
def p_error(t):
raise MADSyntaxError((t.lineno, t.lexpos), t.value)
def merge_map(map_A, map_B):
tmp = map_A.copy()
tmp.update(map_B)
return tmp
class MADSyntaxError(BaseException):
def __init__(self, position, hint):
self.position = position
self.hint = hint
@property
def line_number(self):
return self.position[0]
def __repr__(self):
return "Syntax error at line {line:d}, around '{hint}'.".format(
line=self.position[0],
hint=self.hint)
class Parser:
def __init__(self, file_system, root_file):
self.root_file = root_file
self.file_system = file_system
def parse(self, entry_rule="unit", logger=yacc.NullLogger()):
lexer.lineno = 1
text = self._content()
parser = yacc.yacc(start=entry_rule, errorlog=logger)
return parser.parse(lexer=lexer, input=text)
def _content(self):
lines = self.file_system.open_input_stream(self.root_file).readlines()
return "\n".join(lines)
|
fchauvel/MAD
|
mad/parsing.py
|
p_query
|
python
|
def p_query(p):
parameters = {"service": p[2], "operation": p[4]}
if len(p) > 5:
parameters = merge_map(parameters, p[6])
p[0] = Query(**parameters)
|
query : QUERY IDENTIFIER SLASH IDENTIFIER
| QUERY IDENTIFIER SLASH IDENTIFIER OPEN_CURLY_BRACKET query_option_list CLOSE_CURLY_BRACKET
|
train
|
https://github.com/fchauvel/MAD/blob/806d5174848b1a502e5c683894995602478c448b/mad/parsing.py#L312-L320
|
[
"def merge_map(map_A, map_B):\n tmp = map_A.copy()\n tmp.update(map_B)\n return tmp\n"
] |
#!/usr/bin/env python
#
# This file is part of MAD.
#
# MAD is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MAD is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MAD. If not, see <http://www.gnu.org/licenses/>.
#
import ply.lex as lex
import ply.yacc as yacc
from mad.ast.settings import *
from mad.ast.definitions import *
from mad.ast.actions import *
reserved = {
"autoscaling": "AUTOSCALING",
"client": "CLIENT",
"delay": "DELAY",
"every": "EVERY",
"fail": "FAIL",
"FIFO": "FIFO",
"ignore": "IGNORE",
"invoke": "INVOKE",
"LIFO": "LIFO",
"limit": "LIMIT",
"limits": "LIMITS",
"none": "NONE",
"operation": "OPERATION",
"period": "PERIOD",
"priority": "PRIORITY",
"queue": "QUEUE",
"query": "QUERY",
"retry": "RETRY",
"service": "SERVICE",
"settings": "SETTINGS",
"tail-drop": "TAIL_DROP",
"think": "THINK",
"throttling": "THROTTLING",
"timeout": "TIMEOUT"
}
# List of token names. This is always required
tokens = [ "CLOSE_BRACKET",
"CLOSE_CURLY_BRACKET",
"CLOSE_SQUARE_BRACKET",
"COLON",
"COMMA",
"IDENTIFIER",
"OPEN_BRACKET",
"OPEN_CURLY_BRACKET",
"OPEN_SQUARE_BRACKET",
"NUMBER",
"REAL",
"SLASH"] + list(reserved.values())
t_CLOSE_BRACKET = r"\)"
t_CLOSE_CURLY_BRACKET = r"\}"
t_CLOSE_SQUARE_BRACKET = r"\]"
t_COLON = r":"
t_COMMA = r","
t_OPEN_BRACKET = r"\("
t_OPEN_CURLY_BRACKET = r"\{"
t_OPEN_SQUARE_BRACKET = r"\["
t_SLASH = r"/"
def t_IDENTIFIER(t):
r'[a-zA-Z_][a-zA-Z0-9_\-]+'
t.type = reserved.get(t.value,'IDENTIFIER') # Check for reserved words
return t
def t_NUMBER(t):
r'[+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?'
return t
def t_newline(t):
# Define a rule so we can track line numbers
r'\n+'
t.lexer.lineno += 1
def t_COMMENT(t):
r'\#.*'
pass
# A string containing ignored characters (spaces and tabs)
t_ignore = ' \t'
def t_error(t):
# Error handling rule
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
lexer = lex.lex()
# -----------------------------
# Parsing rules
def p_unit(p):
"""
unit : definition_list
"""
p[0] = p[1]
def p_definition_list(p):
"""
definition_list : definition definition_list
| definition
"""
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_action_list'")
def p_definition(p):
"""
definition : define_service
| define_client
"""
p[0] = p[1]
def p_define_service(p):
"""
define_service : SERVICE IDENTIFIER OPEN_CURLY_BRACKET settings operation_list CLOSE_CURLY_BRACKET
| SERVICE IDENTIFIER OPEN_CURLY_BRACKET operation_list CLOSE_CURLY_BRACKET
"""
if len(p) == 7:
body = p[4] + p[5]
else:
body = p[4]
p[0] = DefineService(p[2], body)
def p_settings(p):
"""
settings : SETTINGS OPEN_CURLY_BRACKET setting_list CLOSE_CURLY_BRACKET
"""
p[0] = Settings(**p[3])
def p_setting_list(p):
"""
setting_list : setting setting_list
| setting
"""
if len(p) == 3:
p[0] = merge_map(p[1], p[2])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_setting_list'")
def p_setting(p):
"""
setting : queue
| autoscaling
| throttling
"""
p[0] = p[1]
def p_queue(p):
"""
queue : QUEUE COLON LIFO
| QUEUE COLON FIFO
"""
if p[3] == "LIFO":
p[0] = {"queue": LIFO()}
elif p[3] == "FIFO":
p[0] = {"queue": FIFO()}
else:
raise RuntimeError("Queue discipline '%s' is not supported!" % p[1])
def p_throttling(p):
"""
throttling : THROTTLING COLON NONE
| THROTTLING COLON TAIL_DROP OPEN_BRACKET NUMBER CLOSE_BRACKET
"""
throttling = NoThrottlingSettings()
if len(p) == 7:
throttling = TailDropSettings(int(p[5]))
p[0] = {"throttling": throttling}
def p_autoscaling(p):
"""
autoscaling : AUTOSCALING OPEN_CURLY_BRACKET autoscaling_setting_list CLOSE_CURLY_BRACKET
"""
p[0] = {"autoscaling": Autoscaling(**p[3])}
def p_autoscaling_setting_list(p):
"""
autoscaling_setting_list : autoscaling_setting autoscaling_setting_list
| autoscaling_setting
"""
if len(p) == 3:
p[0] = merge_map(p[1], p[2])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production in 'autoscaling_setting_list'")
def p_autoscaling_setting(p):
"""
autoscaling_setting : PERIOD COLON NUMBER
| LIMITS COLON OPEN_SQUARE_BRACKET NUMBER COMMA NUMBER CLOSE_SQUARE_BRACKET
"""
if len(p) == 8:
p[0] = {"limits": (int(p[4]), int(p[6]))}
elif len(p) == 4:
p[0] = {"period": int(p[3])}
else:
raise RuntimeError("Invalid product in 'autoscaling_setting'")
def p_operation_list(p):
"""
operation_list : define_operation operation_list
| define_operation
"""
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_operation_list'")
def p_define_client(p):
"""
define_client : CLIENT IDENTIFIER OPEN_CURLY_BRACKET EVERY NUMBER OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET CLOSE_CURLY_BRACKET
"""
p[0] = DefineClientStub(p[2], int(p[5]), p[7])
def p_define_operation(p):
"""
define_operation : OPERATION IDENTIFIER OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
p[0] = DefineOperation(p[2], p[4])
def p_action_list(p):
"""
action_list : action action_list
| action
"""
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_action_list'")
def p_action(p):
"""
action : invoke
| query
| think
| fail
| retry
| ignore
"""
p[0] = p[1]
def p_think(p):
"""
think : THINK NUMBER
"""
p[0] = Think(int(p[2]))
def p_fail(p):
"""
fail : FAIL NUMBER
| FAIL
"""
if len(p) > 2:
p[0] = Fail(float(p[2]))
else:
p[0] = Fail()
def p_query_option_list(p):
"""
query_option_list : query_option COMMA query_option_list
| query_option
"""
if len(p) == 2:
p[0] = p[1]
elif len(p) == 4:
p[0] = merge_map(p[1], p[3])
else:
raise RuntimeError("Invalid product rules for 'query_option_list'")
def p_query_option(p):
"""
query_option : timeout
| priority
"""
p[0] = p[1]
def p_timeout(p):
"""
timeout : TIMEOUT COLON NUMBER
"""
p[0] = {"timeout": int(p[3])}
def p_priority(p):
"""
priority : PRIORITY COLON NUMBER
"""
p[0] = {"priority": int(p[3])}
def p_invoke(p):
"""
invoke : INVOKE IDENTIFIER SLASH IDENTIFIER
| INVOKE IDENTIFIER SLASH IDENTIFIER OPEN_CURLY_BRACKET PRIORITY COLON NUMBER CLOSE_CURLY_BRACKET
"""
priority = None
if len(p) > 5:
priority = int(p[8])
p[0] = Trigger(p[2], p[4], priority)
def p_retry(p):
"""
retry : RETRY OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
| RETRY OPEN_BRACKET retry_option_list CLOSE_BRACKET OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
if len(p) == 5:
p[0] = Retry(p[3])
elif len(p) == 8:
p[0] = Retry(p[6], **p[3])
else:
raise RuntimeError("Invalid product rules for 'retry_option_list'")
def p_retry_option_list(p):
"""
retry_option_list : retry_option COMMA retry_option_list
| retry_option
"""
if len(p) == 4:
p[0] = merge_map(p[1], p[3])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production in 'retry_option_list'")
def p_retry_option(p):
"""
retry_option : LIMIT COLON NUMBER
| DELAY COLON IDENTIFIER OPEN_BRACKET NUMBER CLOSE_BRACKET
"""
if len(p) == 4:
p[0] = {"limit": int(p[3]) }
elif len(p) == 7:
p[0] = {"delay": Delay(int(p[5]), p[3])}
else:
raise RuntimeError("Invalid production in 'retry_option'")
def p_ignore(p):
"""
ignore : IGNORE OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
p[0] = IgnoreError(p[3])
def p_error(t):
raise MADSyntaxError((t.lineno, t.lexpos), t.value)
def merge_map(map_A, map_B):
tmp = map_A.copy()
tmp.update(map_B)
return tmp
class MADSyntaxError(BaseException):
def __init__(self, position, hint):
self.position = position
self.hint = hint
@property
def line_number(self):
return self.position[0]
def __repr__(self):
return "Syntax error at line {line:d}, around '{hint}'.".format(
line=self.position[0],
hint=self.hint)
class Parser:
def __init__(self, file_system, root_file):
self.root_file = root_file
self.file_system = file_system
def parse(self, entry_rule="unit", logger=yacc.NullLogger()):
lexer.lineno = 1
text = self._content()
parser = yacc.yacc(start=entry_rule, errorlog=logger)
return parser.parse(lexer=lexer, input=text)
def _content(self):
lines = self.file_system.open_input_stream(self.root_file).readlines()
return "\n".join(lines)
|
fchauvel/MAD
|
mad/parsing.py
|
p_query_option_list
|
python
|
def p_query_option_list(p):
if len(p) == 2:
p[0] = p[1]
elif len(p) == 4:
p[0] = merge_map(p[1], p[3])
else:
raise RuntimeError("Invalid product rules for 'query_option_list'")
|
query_option_list : query_option COMMA query_option_list
| query_option
|
train
|
https://github.com/fchauvel/MAD/blob/806d5174848b1a502e5c683894995602478c448b/mad/parsing.py#L323-L333
|
[
"def merge_map(map_A, map_B):\n tmp = map_A.copy()\n tmp.update(map_B)\n return tmp\n"
] |
#!/usr/bin/env python
#
# This file is part of MAD.
#
# MAD is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MAD is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MAD. If not, see <http://www.gnu.org/licenses/>.
#
import ply.lex as lex
import ply.yacc as yacc
from mad.ast.settings import *
from mad.ast.definitions import *
from mad.ast.actions import *
reserved = {
"autoscaling": "AUTOSCALING",
"client": "CLIENT",
"delay": "DELAY",
"every": "EVERY",
"fail": "FAIL",
"FIFO": "FIFO",
"ignore": "IGNORE",
"invoke": "INVOKE",
"LIFO": "LIFO",
"limit": "LIMIT",
"limits": "LIMITS",
"none": "NONE",
"operation": "OPERATION",
"period": "PERIOD",
"priority": "PRIORITY",
"queue": "QUEUE",
"query": "QUERY",
"retry": "RETRY",
"service": "SERVICE",
"settings": "SETTINGS",
"tail-drop": "TAIL_DROP",
"think": "THINK",
"throttling": "THROTTLING",
"timeout": "TIMEOUT"
}
# List of token names. This is always required
tokens = [ "CLOSE_BRACKET",
"CLOSE_CURLY_BRACKET",
"CLOSE_SQUARE_BRACKET",
"COLON",
"COMMA",
"IDENTIFIER",
"OPEN_BRACKET",
"OPEN_CURLY_BRACKET",
"OPEN_SQUARE_BRACKET",
"NUMBER",
"REAL",
"SLASH"] + list(reserved.values())
t_CLOSE_BRACKET = r"\)"
t_CLOSE_CURLY_BRACKET = r"\}"
t_CLOSE_SQUARE_BRACKET = r"\]"
t_COLON = r":"
t_COMMA = r","
t_OPEN_BRACKET = r"\("
t_OPEN_CURLY_BRACKET = r"\{"
t_OPEN_SQUARE_BRACKET = r"\["
t_SLASH = r"/"
def t_IDENTIFIER(t):
r'[a-zA-Z_][a-zA-Z0-9_\-]+'
t.type = reserved.get(t.value,'IDENTIFIER') # Check for reserved words
return t
def t_NUMBER(t):
r'[+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?'
return t
def t_newline(t):
# Define a rule so we can track line numbers
r'\n+'
t.lexer.lineno += 1
def t_COMMENT(t):
r'\#.*'
pass
# A string containing ignored characters (spaces and tabs)
t_ignore = ' \t'
def t_error(t):
# Error handling rule
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
lexer = lex.lex()
# -----------------------------
# Parsing rules
def p_unit(p):
"""
unit : definition_list
"""
p[0] = p[1]
def p_definition_list(p):
"""
definition_list : definition definition_list
| definition
"""
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_action_list'")
def p_definition(p):
"""
definition : define_service
| define_client
"""
p[0] = p[1]
def p_define_service(p):
"""
define_service : SERVICE IDENTIFIER OPEN_CURLY_BRACKET settings operation_list CLOSE_CURLY_BRACKET
| SERVICE IDENTIFIER OPEN_CURLY_BRACKET operation_list CLOSE_CURLY_BRACKET
"""
if len(p) == 7:
body = p[4] + p[5]
else:
body = p[4]
p[0] = DefineService(p[2], body)
def p_settings(p):
"""
settings : SETTINGS OPEN_CURLY_BRACKET setting_list CLOSE_CURLY_BRACKET
"""
p[0] = Settings(**p[3])
def p_setting_list(p):
"""
setting_list : setting setting_list
| setting
"""
if len(p) == 3:
p[0] = merge_map(p[1], p[2])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_setting_list'")
def p_setting(p):
"""
setting : queue
| autoscaling
| throttling
"""
p[0] = p[1]
def p_queue(p):
"""
queue : QUEUE COLON LIFO
| QUEUE COLON FIFO
"""
if p[3] == "LIFO":
p[0] = {"queue": LIFO()}
elif p[3] == "FIFO":
p[0] = {"queue": FIFO()}
else:
raise RuntimeError("Queue discipline '%s' is not supported!" % p[1])
def p_throttling(p):
"""
throttling : THROTTLING COLON NONE
| THROTTLING COLON TAIL_DROP OPEN_BRACKET NUMBER CLOSE_BRACKET
"""
throttling = NoThrottlingSettings()
if len(p) == 7:
throttling = TailDropSettings(int(p[5]))
p[0] = {"throttling": throttling}
def p_autoscaling(p):
"""
autoscaling : AUTOSCALING OPEN_CURLY_BRACKET autoscaling_setting_list CLOSE_CURLY_BRACKET
"""
p[0] = {"autoscaling": Autoscaling(**p[3])}
def p_autoscaling_setting_list(p):
"""
autoscaling_setting_list : autoscaling_setting autoscaling_setting_list
| autoscaling_setting
"""
if len(p) == 3:
p[0] = merge_map(p[1], p[2])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production in 'autoscaling_setting_list'")
def p_autoscaling_setting(p):
"""
autoscaling_setting : PERIOD COLON NUMBER
| LIMITS COLON OPEN_SQUARE_BRACKET NUMBER COMMA NUMBER CLOSE_SQUARE_BRACKET
"""
if len(p) == 8:
p[0] = {"limits": (int(p[4]), int(p[6]))}
elif len(p) == 4:
p[0] = {"period": int(p[3])}
else:
raise RuntimeError("Invalid product in 'autoscaling_setting'")
def p_operation_list(p):
"""
operation_list : define_operation operation_list
| define_operation
"""
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_operation_list'")
def p_define_client(p):
"""
define_client : CLIENT IDENTIFIER OPEN_CURLY_BRACKET EVERY NUMBER OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET CLOSE_CURLY_BRACKET
"""
p[0] = DefineClientStub(p[2], int(p[5]), p[7])
def p_define_operation(p):
"""
define_operation : OPERATION IDENTIFIER OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
p[0] = DefineOperation(p[2], p[4])
def p_action_list(p):
"""
action_list : action action_list
| action
"""
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_action_list'")
def p_action(p):
"""
action : invoke
| query
| think
| fail
| retry
| ignore
"""
p[0] = p[1]
def p_think(p):
"""
think : THINK NUMBER
"""
p[0] = Think(int(p[2]))
def p_fail(p):
"""
fail : FAIL NUMBER
| FAIL
"""
if len(p) > 2:
p[0] = Fail(float(p[2]))
else:
p[0] = Fail()
def p_query(p):
"""
query : QUERY IDENTIFIER SLASH IDENTIFIER
| QUERY IDENTIFIER SLASH IDENTIFIER OPEN_CURLY_BRACKET query_option_list CLOSE_CURLY_BRACKET
"""
parameters = {"service": p[2], "operation": p[4]}
if len(p) > 5:
parameters = merge_map(parameters, p[6])
p[0] = Query(**parameters)
def p_query_option(p):
"""
query_option : timeout
| priority
"""
p[0] = p[1]
def p_timeout(p):
"""
timeout : TIMEOUT COLON NUMBER
"""
p[0] = {"timeout": int(p[3])}
def p_priority(p):
"""
priority : PRIORITY COLON NUMBER
"""
p[0] = {"priority": int(p[3])}
def p_invoke(p):
"""
invoke : INVOKE IDENTIFIER SLASH IDENTIFIER
| INVOKE IDENTIFIER SLASH IDENTIFIER OPEN_CURLY_BRACKET PRIORITY COLON NUMBER CLOSE_CURLY_BRACKET
"""
priority = None
if len(p) > 5:
priority = int(p[8])
p[0] = Trigger(p[2], p[4], priority)
def p_retry(p):
"""
retry : RETRY OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
| RETRY OPEN_BRACKET retry_option_list CLOSE_BRACKET OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
if len(p) == 5:
p[0] = Retry(p[3])
elif len(p) == 8:
p[0] = Retry(p[6], **p[3])
else:
raise RuntimeError("Invalid product rules for 'retry_option_list'")
def p_retry_option_list(p):
"""
retry_option_list : retry_option COMMA retry_option_list
| retry_option
"""
if len(p) == 4:
p[0] = merge_map(p[1], p[3])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production in 'retry_option_list'")
def p_retry_option(p):
"""
retry_option : LIMIT COLON NUMBER
| DELAY COLON IDENTIFIER OPEN_BRACKET NUMBER CLOSE_BRACKET
"""
if len(p) == 4:
p[0] = {"limit": int(p[3]) }
elif len(p) == 7:
p[0] = {"delay": Delay(int(p[5]), p[3])}
else:
raise RuntimeError("Invalid production in 'retry_option'")
def p_ignore(p):
"""
ignore : IGNORE OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
p[0] = IgnoreError(p[3])
def p_error(t):
raise MADSyntaxError((t.lineno, t.lexpos), t.value)
def merge_map(map_A, map_B):
tmp = map_A.copy()
tmp.update(map_B)
return tmp
class MADSyntaxError(BaseException):
def __init__(self, position, hint):
self.position = position
self.hint = hint
@property
def line_number(self):
return self.position[0]
def __repr__(self):
return "Syntax error at line {line:d}, around '{hint}'.".format(
line=self.position[0],
hint=self.hint)
class Parser:
def __init__(self, file_system, root_file):
self.root_file = root_file
self.file_system = file_system
def parse(self, entry_rule="unit", logger=yacc.NullLogger()):
lexer.lineno = 1
text = self._content()
parser = yacc.yacc(start=entry_rule, errorlog=logger)
return parser.parse(lexer=lexer, input=text)
def _content(self):
lines = self.file_system.open_input_stream(self.root_file).readlines()
return "\n".join(lines)
|
fchauvel/MAD
|
mad/parsing.py
|
p_invoke
|
python
|
def p_invoke(p):
priority = None
if len(p) > 5:
priority = int(p[8])
p[0] = Trigger(p[2], p[4], priority)
|
invoke : INVOKE IDENTIFIER SLASH IDENTIFIER
| INVOKE IDENTIFIER SLASH IDENTIFIER OPEN_CURLY_BRACKET PRIORITY COLON NUMBER CLOSE_CURLY_BRACKET
|
train
|
https://github.com/fchauvel/MAD/blob/806d5174848b1a502e5c683894995602478c448b/mad/parsing.py#L358-L366
| null |
#!/usr/bin/env python
#
# This file is part of MAD.
#
# MAD is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MAD is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MAD. If not, see <http://www.gnu.org/licenses/>.
#
import ply.lex as lex
import ply.yacc as yacc
from mad.ast.settings import *
from mad.ast.definitions import *
from mad.ast.actions import *
reserved = {
"autoscaling": "AUTOSCALING",
"client": "CLIENT",
"delay": "DELAY",
"every": "EVERY",
"fail": "FAIL",
"FIFO": "FIFO",
"ignore": "IGNORE",
"invoke": "INVOKE",
"LIFO": "LIFO",
"limit": "LIMIT",
"limits": "LIMITS",
"none": "NONE",
"operation": "OPERATION",
"period": "PERIOD",
"priority": "PRIORITY",
"queue": "QUEUE",
"query": "QUERY",
"retry": "RETRY",
"service": "SERVICE",
"settings": "SETTINGS",
"tail-drop": "TAIL_DROP",
"think": "THINK",
"throttling": "THROTTLING",
"timeout": "TIMEOUT"
}
# List of token names. This is always required
tokens = [ "CLOSE_BRACKET",
"CLOSE_CURLY_BRACKET",
"CLOSE_SQUARE_BRACKET",
"COLON",
"COMMA",
"IDENTIFIER",
"OPEN_BRACKET",
"OPEN_CURLY_BRACKET",
"OPEN_SQUARE_BRACKET",
"NUMBER",
"REAL",
"SLASH"] + list(reserved.values())
t_CLOSE_BRACKET = r"\)"
t_CLOSE_CURLY_BRACKET = r"\}"
t_CLOSE_SQUARE_BRACKET = r"\]"
t_COLON = r":"
t_COMMA = r","
t_OPEN_BRACKET = r"\("
t_OPEN_CURLY_BRACKET = r"\{"
t_OPEN_SQUARE_BRACKET = r"\["
t_SLASH = r"/"
def t_IDENTIFIER(t):
r'[a-zA-Z_][a-zA-Z0-9_\-]+'
t.type = reserved.get(t.value,'IDENTIFIER') # Check for reserved words
return t
def t_NUMBER(t):
r'[+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?'
return t
def t_newline(t):
# Define a rule so we can track line numbers
r'\n+'
t.lexer.lineno += 1
def t_COMMENT(t):
r'\#.*'
pass
# A string containing ignored characters (spaces and tabs)
t_ignore = ' \t'
def t_error(t):
# Error handling rule
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
lexer = lex.lex()
# -----------------------------
# Parsing rules
def p_unit(p):
"""
unit : definition_list
"""
p[0] = p[1]
def p_definition_list(p):
"""
definition_list : definition definition_list
| definition
"""
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_action_list'")
def p_definition(p):
"""
definition : define_service
| define_client
"""
p[0] = p[1]
def p_define_service(p):
"""
define_service : SERVICE IDENTIFIER OPEN_CURLY_BRACKET settings operation_list CLOSE_CURLY_BRACKET
| SERVICE IDENTIFIER OPEN_CURLY_BRACKET operation_list CLOSE_CURLY_BRACKET
"""
if len(p) == 7:
body = p[4] + p[5]
else:
body = p[4]
p[0] = DefineService(p[2], body)
def p_settings(p):
"""
settings : SETTINGS OPEN_CURLY_BRACKET setting_list CLOSE_CURLY_BRACKET
"""
p[0] = Settings(**p[3])
def p_setting_list(p):
"""
setting_list : setting setting_list
| setting
"""
if len(p) == 3:
p[0] = merge_map(p[1], p[2])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_setting_list'")
def p_setting(p):
"""
setting : queue
| autoscaling
| throttling
"""
p[0] = p[1]
def p_queue(p):
"""
queue : QUEUE COLON LIFO
| QUEUE COLON FIFO
"""
if p[3] == "LIFO":
p[0] = {"queue": LIFO()}
elif p[3] == "FIFO":
p[0] = {"queue": FIFO()}
else:
raise RuntimeError("Queue discipline '%s' is not supported!" % p[1])
def p_throttling(p):
"""
throttling : THROTTLING COLON NONE
| THROTTLING COLON TAIL_DROP OPEN_BRACKET NUMBER CLOSE_BRACKET
"""
throttling = NoThrottlingSettings()
if len(p) == 7:
throttling = TailDropSettings(int(p[5]))
p[0] = {"throttling": throttling}
def p_autoscaling(p):
"""
autoscaling : AUTOSCALING OPEN_CURLY_BRACKET autoscaling_setting_list CLOSE_CURLY_BRACKET
"""
p[0] = {"autoscaling": Autoscaling(**p[3])}
def p_autoscaling_setting_list(p):
"""
autoscaling_setting_list : autoscaling_setting autoscaling_setting_list
| autoscaling_setting
"""
if len(p) == 3:
p[0] = merge_map(p[1], p[2])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production in 'autoscaling_setting_list'")
def p_autoscaling_setting(p):
"""
autoscaling_setting : PERIOD COLON NUMBER
| LIMITS COLON OPEN_SQUARE_BRACKET NUMBER COMMA NUMBER CLOSE_SQUARE_BRACKET
"""
if len(p) == 8:
p[0] = {"limits": (int(p[4]), int(p[6]))}
elif len(p) == 4:
p[0] = {"period": int(p[3])}
else:
raise RuntimeError("Invalid product in 'autoscaling_setting'")
def p_operation_list(p):
"""
operation_list : define_operation operation_list
| define_operation
"""
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_operation_list'")
def p_define_client(p):
"""
define_client : CLIENT IDENTIFIER OPEN_CURLY_BRACKET EVERY NUMBER OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET CLOSE_CURLY_BRACKET
"""
p[0] = DefineClientStub(p[2], int(p[5]), p[7])
def p_define_operation(p):
"""
define_operation : OPERATION IDENTIFIER OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
p[0] = DefineOperation(p[2], p[4])
def p_action_list(p):
"""
action_list : action action_list
| action
"""
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_action_list'")
def p_action(p):
"""
action : invoke
| query
| think
| fail
| retry
| ignore
"""
p[0] = p[1]
def p_think(p):
"""
think : THINK NUMBER
"""
p[0] = Think(int(p[2]))
def p_fail(p):
"""
fail : FAIL NUMBER
| FAIL
"""
if len(p) > 2:
p[0] = Fail(float(p[2]))
else:
p[0] = Fail()
def p_query(p):
"""
query : QUERY IDENTIFIER SLASH IDENTIFIER
| QUERY IDENTIFIER SLASH IDENTIFIER OPEN_CURLY_BRACKET query_option_list CLOSE_CURLY_BRACKET
"""
parameters = {"service": p[2], "operation": p[4]}
if len(p) > 5:
parameters = merge_map(parameters, p[6])
p[0] = Query(**parameters)
def p_query_option_list(p):
"""
query_option_list : query_option COMMA query_option_list
| query_option
"""
if len(p) == 2:
p[0] = p[1]
elif len(p) == 4:
p[0] = merge_map(p[1], p[3])
else:
raise RuntimeError("Invalid product rules for 'query_option_list'")
def p_query_option(p):
"""
query_option : timeout
| priority
"""
p[0] = p[1]
def p_timeout(p):
"""
timeout : TIMEOUT COLON NUMBER
"""
p[0] = {"timeout": int(p[3])}
def p_priority(p):
"""
priority : PRIORITY COLON NUMBER
"""
p[0] = {"priority": int(p[3])}
def p_retry(p):
"""
retry : RETRY OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
| RETRY OPEN_BRACKET retry_option_list CLOSE_BRACKET OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
if len(p) == 5:
p[0] = Retry(p[3])
elif len(p) == 8:
p[0] = Retry(p[6], **p[3])
else:
raise RuntimeError("Invalid product rules for 'retry_option_list'")
def p_retry_option_list(p):
"""
retry_option_list : retry_option COMMA retry_option_list
| retry_option
"""
if len(p) == 4:
p[0] = merge_map(p[1], p[3])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production in 'retry_option_list'")
def p_retry_option(p):
"""
retry_option : LIMIT COLON NUMBER
| DELAY COLON IDENTIFIER OPEN_BRACKET NUMBER CLOSE_BRACKET
"""
if len(p) == 4:
p[0] = {"limit": int(p[3]) }
elif len(p) == 7:
p[0] = {"delay": Delay(int(p[5]), p[3])}
else:
raise RuntimeError("Invalid production in 'retry_option'")
def p_ignore(p):
"""
ignore : IGNORE OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
p[0] = IgnoreError(p[3])
def p_error(t):
raise MADSyntaxError((t.lineno, t.lexpos), t.value)
def merge_map(map_A, map_B):
tmp = map_A.copy()
tmp.update(map_B)
return tmp
class MADSyntaxError(BaseException):
def __init__(self, position, hint):
self.position = position
self.hint = hint
@property
def line_number(self):
return self.position[0]
def __repr__(self):
return "Syntax error at line {line:d}, around '{hint}'.".format(
line=self.position[0],
hint=self.hint)
class Parser:
def __init__(self, file_system, root_file):
self.root_file = root_file
self.file_system = file_system
def parse(self, entry_rule="unit", logger=yacc.NullLogger()):
lexer.lineno = 1
text = self._content()
parser = yacc.yacc(start=entry_rule, errorlog=logger)
return parser.parse(lexer=lexer, input=text)
def _content(self):
lines = self.file_system.open_input_stream(self.root_file).readlines()
return "\n".join(lines)
|
fchauvel/MAD
|
mad/parsing.py
|
p_retry
|
python
|
def p_retry(p):
if len(p) == 5:
p[0] = Retry(p[3])
elif len(p) == 8:
p[0] = Retry(p[6], **p[3])
else:
raise RuntimeError("Invalid product rules for 'retry_option_list'")
|
retry : RETRY OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
| RETRY OPEN_BRACKET retry_option_list CLOSE_BRACKET OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
|
train
|
https://github.com/fchauvel/MAD/blob/806d5174848b1a502e5c683894995602478c448b/mad/parsing.py#L369-L379
| null |
#!/usr/bin/env python
#
# This file is part of MAD.
#
# MAD is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MAD is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MAD. If not, see <http://www.gnu.org/licenses/>.
#
import ply.lex as lex
import ply.yacc as yacc
from mad.ast.settings import *
from mad.ast.definitions import *
from mad.ast.actions import *
reserved = {
"autoscaling": "AUTOSCALING",
"client": "CLIENT",
"delay": "DELAY",
"every": "EVERY",
"fail": "FAIL",
"FIFO": "FIFO",
"ignore": "IGNORE",
"invoke": "INVOKE",
"LIFO": "LIFO",
"limit": "LIMIT",
"limits": "LIMITS",
"none": "NONE",
"operation": "OPERATION",
"period": "PERIOD",
"priority": "PRIORITY",
"queue": "QUEUE",
"query": "QUERY",
"retry": "RETRY",
"service": "SERVICE",
"settings": "SETTINGS",
"tail-drop": "TAIL_DROP",
"think": "THINK",
"throttling": "THROTTLING",
"timeout": "TIMEOUT"
}
# List of token names. This is always required
tokens = [ "CLOSE_BRACKET",
"CLOSE_CURLY_BRACKET",
"CLOSE_SQUARE_BRACKET",
"COLON",
"COMMA",
"IDENTIFIER",
"OPEN_BRACKET",
"OPEN_CURLY_BRACKET",
"OPEN_SQUARE_BRACKET",
"NUMBER",
"REAL",
"SLASH"] + list(reserved.values())
t_CLOSE_BRACKET = r"\)"
t_CLOSE_CURLY_BRACKET = r"\}"
t_CLOSE_SQUARE_BRACKET = r"\]"
t_COLON = r":"
t_COMMA = r","
t_OPEN_BRACKET = r"\("
t_OPEN_CURLY_BRACKET = r"\{"
t_OPEN_SQUARE_BRACKET = r"\["
t_SLASH = r"/"
def t_IDENTIFIER(t):
r'[a-zA-Z_][a-zA-Z0-9_\-]+'
t.type = reserved.get(t.value,'IDENTIFIER') # Check for reserved words
return t
def t_NUMBER(t):
r'[+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?'
return t
def t_newline(t):
# Define a rule so we can track line numbers
r'\n+'
t.lexer.lineno += 1
def t_COMMENT(t):
r'\#.*'
pass
# A string containing ignored characters (spaces and tabs)
t_ignore = ' \t'
def t_error(t):
# Error handling rule
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
lexer = lex.lex()
# -----------------------------
# Parsing rules
def p_unit(p):
"""
unit : definition_list
"""
p[0] = p[1]
def p_definition_list(p):
"""
definition_list : definition definition_list
| definition
"""
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_action_list'")
def p_definition(p):
"""
definition : define_service
| define_client
"""
p[0] = p[1]
def p_define_service(p):
"""
define_service : SERVICE IDENTIFIER OPEN_CURLY_BRACKET settings operation_list CLOSE_CURLY_BRACKET
| SERVICE IDENTIFIER OPEN_CURLY_BRACKET operation_list CLOSE_CURLY_BRACKET
"""
if len(p) == 7:
body = p[4] + p[5]
else:
body = p[4]
p[0] = DefineService(p[2], body)
def p_settings(p):
"""
settings : SETTINGS OPEN_CURLY_BRACKET setting_list CLOSE_CURLY_BRACKET
"""
p[0] = Settings(**p[3])
def p_setting_list(p):
"""
setting_list : setting setting_list
| setting
"""
if len(p) == 3:
p[0] = merge_map(p[1], p[2])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_setting_list'")
def p_setting(p):
"""
setting : queue
| autoscaling
| throttling
"""
p[0] = p[1]
def p_queue(p):
"""
queue : QUEUE COLON LIFO
| QUEUE COLON FIFO
"""
if p[3] == "LIFO":
p[0] = {"queue": LIFO()}
elif p[3] == "FIFO":
p[0] = {"queue": FIFO()}
else:
raise RuntimeError("Queue discipline '%s' is not supported!" % p[1])
def p_throttling(p):
"""
throttling : THROTTLING COLON NONE
| THROTTLING COLON TAIL_DROP OPEN_BRACKET NUMBER CLOSE_BRACKET
"""
throttling = NoThrottlingSettings()
if len(p) == 7:
throttling = TailDropSettings(int(p[5]))
p[0] = {"throttling": throttling}
def p_autoscaling(p):
"""
autoscaling : AUTOSCALING OPEN_CURLY_BRACKET autoscaling_setting_list CLOSE_CURLY_BRACKET
"""
p[0] = {"autoscaling": Autoscaling(**p[3])}
def p_autoscaling_setting_list(p):
"""
autoscaling_setting_list : autoscaling_setting autoscaling_setting_list
| autoscaling_setting
"""
if len(p) == 3:
p[0] = merge_map(p[1], p[2])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production in 'autoscaling_setting_list'")
def p_autoscaling_setting(p):
"""
autoscaling_setting : PERIOD COLON NUMBER
| LIMITS COLON OPEN_SQUARE_BRACKET NUMBER COMMA NUMBER CLOSE_SQUARE_BRACKET
"""
if len(p) == 8:
p[0] = {"limits": (int(p[4]), int(p[6]))}
elif len(p) == 4:
p[0] = {"period": int(p[3])}
else:
raise RuntimeError("Invalid product in 'autoscaling_setting'")
def p_operation_list(p):
"""
operation_list : define_operation operation_list
| define_operation
"""
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_operation_list'")
def p_define_client(p):
"""
define_client : CLIENT IDENTIFIER OPEN_CURLY_BRACKET EVERY NUMBER OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET CLOSE_CURLY_BRACKET
"""
p[0] = DefineClientStub(p[2], int(p[5]), p[7])
def p_define_operation(p):
"""
define_operation : OPERATION IDENTIFIER OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
p[0] = DefineOperation(p[2], p[4])
def p_action_list(p):
"""
action_list : action action_list
| action
"""
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_action_list'")
def p_action(p):
"""
action : invoke
| query
| think
| fail
| retry
| ignore
"""
p[0] = p[1]
def p_think(p):
"""
think : THINK NUMBER
"""
p[0] = Think(int(p[2]))
def p_fail(p):
"""
fail : FAIL NUMBER
| FAIL
"""
if len(p) > 2:
p[0] = Fail(float(p[2]))
else:
p[0] = Fail()
def p_query(p):
"""
query : QUERY IDENTIFIER SLASH IDENTIFIER
| QUERY IDENTIFIER SLASH IDENTIFIER OPEN_CURLY_BRACKET query_option_list CLOSE_CURLY_BRACKET
"""
parameters = {"service": p[2], "operation": p[4]}
if len(p) > 5:
parameters = merge_map(parameters, p[6])
p[0] = Query(**parameters)
def p_query_option_list(p):
"""
query_option_list : query_option COMMA query_option_list
| query_option
"""
if len(p) == 2:
p[0] = p[1]
elif len(p) == 4:
p[0] = merge_map(p[1], p[3])
else:
raise RuntimeError("Invalid product rules for 'query_option_list'")
def p_query_option(p):
"""
query_option : timeout
| priority
"""
p[0] = p[1]
def p_timeout(p):
"""
timeout : TIMEOUT COLON NUMBER
"""
p[0] = {"timeout": int(p[3])}
def p_priority(p):
"""
priority : PRIORITY COLON NUMBER
"""
p[0] = {"priority": int(p[3])}
def p_invoke(p):
"""
invoke : INVOKE IDENTIFIER SLASH IDENTIFIER
| INVOKE IDENTIFIER SLASH IDENTIFIER OPEN_CURLY_BRACKET PRIORITY COLON NUMBER CLOSE_CURLY_BRACKET
"""
priority = None
if len(p) > 5:
priority = int(p[8])
p[0] = Trigger(p[2], p[4], priority)
def p_retry_option_list(p):
"""
retry_option_list : retry_option COMMA retry_option_list
| retry_option
"""
if len(p) == 4:
p[0] = merge_map(p[1], p[3])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production in 'retry_option_list'")
def p_retry_option(p):
"""
retry_option : LIMIT COLON NUMBER
| DELAY COLON IDENTIFIER OPEN_BRACKET NUMBER CLOSE_BRACKET
"""
if len(p) == 4:
p[0] = {"limit": int(p[3]) }
elif len(p) == 7:
p[0] = {"delay": Delay(int(p[5]), p[3])}
else:
raise RuntimeError("Invalid production in 'retry_option'")
def p_ignore(p):
"""
ignore : IGNORE OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
p[0] = IgnoreError(p[3])
def p_error(t):
raise MADSyntaxError((t.lineno, t.lexpos), t.value)
def merge_map(map_A, map_B):
tmp = map_A.copy()
tmp.update(map_B)
return tmp
class MADSyntaxError(BaseException):
def __init__(self, position, hint):
self.position = position
self.hint = hint
@property
def line_number(self):
return self.position[0]
def __repr__(self):
return "Syntax error at line {line:d}, around '{hint}'.".format(
line=self.position[0],
hint=self.hint)
class Parser:
def __init__(self, file_system, root_file):
self.root_file = root_file
self.file_system = file_system
def parse(self, entry_rule="unit", logger=yacc.NullLogger()):
lexer.lineno = 1
text = self._content()
parser = yacc.yacc(start=entry_rule, errorlog=logger)
return parser.parse(lexer=lexer, input=text)
def _content(self):
lines = self.file_system.open_input_stream(self.root_file).readlines()
return "\n".join(lines)
|
fchauvel/MAD
|
mad/parsing.py
|
p_retry_option_list
|
python
|
def p_retry_option_list(p):
if len(p) == 4:
p[0] = merge_map(p[1], p[3])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production in 'retry_option_list'")
|
retry_option_list : retry_option COMMA retry_option_list
| retry_option
|
train
|
https://github.com/fchauvel/MAD/blob/806d5174848b1a502e5c683894995602478c448b/mad/parsing.py#L382-L392
|
[
"def merge_map(map_A, map_B):\n tmp = map_A.copy()\n tmp.update(map_B)\n return tmp\n"
] |
#!/usr/bin/env python
#
# This file is part of MAD.
#
# MAD is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MAD is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MAD. If not, see <http://www.gnu.org/licenses/>.
#
import ply.lex as lex
import ply.yacc as yacc
from mad.ast.settings import *
from mad.ast.definitions import *
from mad.ast.actions import *
reserved = {
"autoscaling": "AUTOSCALING",
"client": "CLIENT",
"delay": "DELAY",
"every": "EVERY",
"fail": "FAIL",
"FIFO": "FIFO",
"ignore": "IGNORE",
"invoke": "INVOKE",
"LIFO": "LIFO",
"limit": "LIMIT",
"limits": "LIMITS",
"none": "NONE",
"operation": "OPERATION",
"period": "PERIOD",
"priority": "PRIORITY",
"queue": "QUEUE",
"query": "QUERY",
"retry": "RETRY",
"service": "SERVICE",
"settings": "SETTINGS",
"tail-drop": "TAIL_DROP",
"think": "THINK",
"throttling": "THROTTLING",
"timeout": "TIMEOUT"
}
# List of token names. This is always required
tokens = [ "CLOSE_BRACKET",
"CLOSE_CURLY_BRACKET",
"CLOSE_SQUARE_BRACKET",
"COLON",
"COMMA",
"IDENTIFIER",
"OPEN_BRACKET",
"OPEN_CURLY_BRACKET",
"OPEN_SQUARE_BRACKET",
"NUMBER",
"REAL",
"SLASH"] + list(reserved.values())
t_CLOSE_BRACKET = r"\)"
t_CLOSE_CURLY_BRACKET = r"\}"
t_CLOSE_SQUARE_BRACKET = r"\]"
t_COLON = r":"
t_COMMA = r","
t_OPEN_BRACKET = r"\("
t_OPEN_CURLY_BRACKET = r"\{"
t_OPEN_SQUARE_BRACKET = r"\["
t_SLASH = r"/"
def t_IDENTIFIER(t):
r'[a-zA-Z_][a-zA-Z0-9_\-]+'
t.type = reserved.get(t.value,'IDENTIFIER') # Check for reserved words
return t
def t_NUMBER(t):
r'[+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?'
return t
def t_newline(t):
# Define a rule so we can track line numbers
r'\n+'
t.lexer.lineno += 1
def t_COMMENT(t):
r'\#.*'
pass
# A string containing ignored characters (spaces and tabs)
t_ignore = ' \t'
def t_error(t):
# Error handling rule
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
lexer = lex.lex()
# -----------------------------
# Parsing rules
def p_unit(p):
"""
unit : definition_list
"""
p[0] = p[1]
def p_definition_list(p):
"""
definition_list : definition definition_list
| definition
"""
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_action_list'")
def p_definition(p):
"""
definition : define_service
| define_client
"""
p[0] = p[1]
def p_define_service(p):
"""
define_service : SERVICE IDENTIFIER OPEN_CURLY_BRACKET settings operation_list CLOSE_CURLY_BRACKET
| SERVICE IDENTIFIER OPEN_CURLY_BRACKET operation_list CLOSE_CURLY_BRACKET
"""
if len(p) == 7:
body = p[4] + p[5]
else:
body = p[4]
p[0] = DefineService(p[2], body)
def p_settings(p):
"""
settings : SETTINGS OPEN_CURLY_BRACKET setting_list CLOSE_CURLY_BRACKET
"""
p[0] = Settings(**p[3])
def p_setting_list(p):
"""
setting_list : setting setting_list
| setting
"""
if len(p) == 3:
p[0] = merge_map(p[1], p[2])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_setting_list'")
def p_setting(p):
"""
setting : queue
| autoscaling
| throttling
"""
p[0] = p[1]
def p_queue(p):
"""
queue : QUEUE COLON LIFO
| QUEUE COLON FIFO
"""
if p[3] == "LIFO":
p[0] = {"queue": LIFO()}
elif p[3] == "FIFO":
p[0] = {"queue": FIFO()}
else:
raise RuntimeError("Queue discipline '%s' is not supported!" % p[1])
def p_throttling(p):
"""
throttling : THROTTLING COLON NONE
| THROTTLING COLON TAIL_DROP OPEN_BRACKET NUMBER CLOSE_BRACKET
"""
throttling = NoThrottlingSettings()
if len(p) == 7:
throttling = TailDropSettings(int(p[5]))
p[0] = {"throttling": throttling}
def p_autoscaling(p):
"""
autoscaling : AUTOSCALING OPEN_CURLY_BRACKET autoscaling_setting_list CLOSE_CURLY_BRACKET
"""
p[0] = {"autoscaling": Autoscaling(**p[3])}
def p_autoscaling_setting_list(p):
"""
autoscaling_setting_list : autoscaling_setting autoscaling_setting_list
| autoscaling_setting
"""
if len(p) == 3:
p[0] = merge_map(p[1], p[2])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production in 'autoscaling_setting_list'")
def p_autoscaling_setting(p):
"""
autoscaling_setting : PERIOD COLON NUMBER
| LIMITS COLON OPEN_SQUARE_BRACKET NUMBER COMMA NUMBER CLOSE_SQUARE_BRACKET
"""
if len(p) == 8:
p[0] = {"limits": (int(p[4]), int(p[6]))}
elif len(p) == 4:
p[0] = {"period": int(p[3])}
else:
raise RuntimeError("Invalid product in 'autoscaling_setting'")
def p_operation_list(p):
"""
operation_list : define_operation operation_list
| define_operation
"""
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_operation_list'")
def p_define_client(p):
"""
define_client : CLIENT IDENTIFIER OPEN_CURLY_BRACKET EVERY NUMBER OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET CLOSE_CURLY_BRACKET
"""
p[0] = DefineClientStub(p[2], int(p[5]), p[7])
def p_define_operation(p):
"""
define_operation : OPERATION IDENTIFIER OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
p[0] = DefineOperation(p[2], p[4])
def p_action_list(p):
"""
action_list : action action_list
| action
"""
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_action_list'")
def p_action(p):
"""
action : invoke
| query
| think
| fail
| retry
| ignore
"""
p[0] = p[1]
def p_think(p):
"""
think : THINK NUMBER
"""
p[0] = Think(int(p[2]))
def p_fail(p):
"""
fail : FAIL NUMBER
| FAIL
"""
if len(p) > 2:
p[0] = Fail(float(p[2]))
else:
p[0] = Fail()
def p_query(p):
"""
query : QUERY IDENTIFIER SLASH IDENTIFIER
| QUERY IDENTIFIER SLASH IDENTIFIER OPEN_CURLY_BRACKET query_option_list CLOSE_CURLY_BRACKET
"""
parameters = {"service": p[2], "operation": p[4]}
if len(p) > 5:
parameters = merge_map(parameters, p[6])
p[0] = Query(**parameters)
def p_query_option_list(p):
"""
query_option_list : query_option COMMA query_option_list
| query_option
"""
if len(p) == 2:
p[0] = p[1]
elif len(p) == 4:
p[0] = merge_map(p[1], p[3])
else:
raise RuntimeError("Invalid product rules for 'query_option_list'")
def p_query_option(p):
"""
query_option : timeout
| priority
"""
p[0] = p[1]
def p_timeout(p):
"""
timeout : TIMEOUT COLON NUMBER
"""
p[0] = {"timeout": int(p[3])}
def p_priority(p):
"""
priority : PRIORITY COLON NUMBER
"""
p[0] = {"priority": int(p[3])}
def p_invoke(p):
"""
invoke : INVOKE IDENTIFIER SLASH IDENTIFIER
| INVOKE IDENTIFIER SLASH IDENTIFIER OPEN_CURLY_BRACKET PRIORITY COLON NUMBER CLOSE_CURLY_BRACKET
"""
priority = None
if len(p) > 5:
priority = int(p[8])
p[0] = Trigger(p[2], p[4], priority)
def p_retry(p):
"""
retry : RETRY OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
| RETRY OPEN_BRACKET retry_option_list CLOSE_BRACKET OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
if len(p) == 5:
p[0] = Retry(p[3])
elif len(p) == 8:
p[0] = Retry(p[6], **p[3])
else:
raise RuntimeError("Invalid product rules for 'retry_option_list'")
def p_retry_option(p):
"""
retry_option : LIMIT COLON NUMBER
| DELAY COLON IDENTIFIER OPEN_BRACKET NUMBER CLOSE_BRACKET
"""
if len(p) == 4:
p[0] = {"limit": int(p[3]) }
elif len(p) == 7:
p[0] = {"delay": Delay(int(p[5]), p[3])}
else:
raise RuntimeError("Invalid production in 'retry_option'")
def p_ignore(p):
"""
ignore : IGNORE OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
p[0] = IgnoreError(p[3])
def p_error(t):
raise MADSyntaxError((t.lineno, t.lexpos), t.value)
def merge_map(map_A, map_B):
tmp = map_A.copy()
tmp.update(map_B)
return tmp
class MADSyntaxError(BaseException):
def __init__(self, position, hint):
self.position = position
self.hint = hint
@property
def line_number(self):
return self.position[0]
def __repr__(self):
return "Syntax error at line {line:d}, around '{hint}'.".format(
line=self.position[0],
hint=self.hint)
class Parser:
def __init__(self, file_system, root_file):
self.root_file = root_file
self.file_system = file_system
def parse(self, entry_rule="unit", logger=yacc.NullLogger()):
lexer.lineno = 1
text = self._content()
parser = yacc.yacc(start=entry_rule, errorlog=logger)
return parser.parse(lexer=lexer, input=text)
def _content(self):
lines = self.file_system.open_input_stream(self.root_file).readlines()
return "\n".join(lines)
|
fchauvel/MAD
|
mad/parsing.py
|
p_retry_option
|
python
|
def p_retry_option(p):
if len(p) == 4:
p[0] = {"limit": int(p[3]) }
elif len(p) == 7:
p[0] = {"delay": Delay(int(p[5]), p[3])}
else:
raise RuntimeError("Invalid production in 'retry_option'")
|
retry_option : LIMIT COLON NUMBER
| DELAY COLON IDENTIFIER OPEN_BRACKET NUMBER CLOSE_BRACKET
|
train
|
https://github.com/fchauvel/MAD/blob/806d5174848b1a502e5c683894995602478c448b/mad/parsing.py#L395-L405
| null |
#!/usr/bin/env python
#
# This file is part of MAD.
#
# MAD is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MAD is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MAD. If not, see <http://www.gnu.org/licenses/>.
#
import ply.lex as lex
import ply.yacc as yacc
from mad.ast.settings import *
from mad.ast.definitions import *
from mad.ast.actions import *
reserved = {
"autoscaling": "AUTOSCALING",
"client": "CLIENT",
"delay": "DELAY",
"every": "EVERY",
"fail": "FAIL",
"FIFO": "FIFO",
"ignore": "IGNORE",
"invoke": "INVOKE",
"LIFO": "LIFO",
"limit": "LIMIT",
"limits": "LIMITS",
"none": "NONE",
"operation": "OPERATION",
"period": "PERIOD",
"priority": "PRIORITY",
"queue": "QUEUE",
"query": "QUERY",
"retry": "RETRY",
"service": "SERVICE",
"settings": "SETTINGS",
"tail-drop": "TAIL_DROP",
"think": "THINK",
"throttling": "THROTTLING",
"timeout": "TIMEOUT"
}
# List of token names. This is always required
tokens = [ "CLOSE_BRACKET",
"CLOSE_CURLY_BRACKET",
"CLOSE_SQUARE_BRACKET",
"COLON",
"COMMA",
"IDENTIFIER",
"OPEN_BRACKET",
"OPEN_CURLY_BRACKET",
"OPEN_SQUARE_BRACKET",
"NUMBER",
"REAL",
"SLASH"] + list(reserved.values())
t_CLOSE_BRACKET = r"\)"
t_CLOSE_CURLY_BRACKET = r"\}"
t_CLOSE_SQUARE_BRACKET = r"\]"
t_COLON = r":"
t_COMMA = r","
t_OPEN_BRACKET = r"\("
t_OPEN_CURLY_BRACKET = r"\{"
t_OPEN_SQUARE_BRACKET = r"\["
t_SLASH = r"/"
def t_IDENTIFIER(t):
r'[a-zA-Z_][a-zA-Z0-9_\-]+'
t.type = reserved.get(t.value,'IDENTIFIER') # Check for reserved words
return t
def t_NUMBER(t):
r'[+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?'
return t
def t_newline(t):
# Define a rule so we can track line numbers
r'\n+'
t.lexer.lineno += 1
def t_COMMENT(t):
r'\#.*'
pass
# A string containing ignored characters (spaces and tabs)
t_ignore = ' \t'
def t_error(t):
# Error handling rule
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
lexer = lex.lex()
# -----------------------------
# Parsing rules
def p_unit(p):
"""
unit : definition_list
"""
p[0] = p[1]
def p_definition_list(p):
"""
definition_list : definition definition_list
| definition
"""
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_action_list'")
def p_definition(p):
"""
definition : define_service
| define_client
"""
p[0] = p[1]
def p_define_service(p):
"""
define_service : SERVICE IDENTIFIER OPEN_CURLY_BRACKET settings operation_list CLOSE_CURLY_BRACKET
| SERVICE IDENTIFIER OPEN_CURLY_BRACKET operation_list CLOSE_CURLY_BRACKET
"""
if len(p) == 7:
body = p[4] + p[5]
else:
body = p[4]
p[0] = DefineService(p[2], body)
def p_settings(p):
"""
settings : SETTINGS OPEN_CURLY_BRACKET setting_list CLOSE_CURLY_BRACKET
"""
p[0] = Settings(**p[3])
def p_setting_list(p):
"""
setting_list : setting setting_list
| setting
"""
if len(p) == 3:
p[0] = merge_map(p[1], p[2])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_setting_list'")
def p_setting(p):
"""
setting : queue
| autoscaling
| throttling
"""
p[0] = p[1]
def p_queue(p):
"""
queue : QUEUE COLON LIFO
| QUEUE COLON FIFO
"""
if p[3] == "LIFO":
p[0] = {"queue": LIFO()}
elif p[3] == "FIFO":
p[0] = {"queue": FIFO()}
else:
raise RuntimeError("Queue discipline '%s' is not supported!" % p[1])
def p_throttling(p):
"""
throttling : THROTTLING COLON NONE
| THROTTLING COLON TAIL_DROP OPEN_BRACKET NUMBER CLOSE_BRACKET
"""
throttling = NoThrottlingSettings()
if len(p) == 7:
throttling = TailDropSettings(int(p[5]))
p[0] = {"throttling": throttling}
def p_autoscaling(p):
"""
autoscaling : AUTOSCALING OPEN_CURLY_BRACKET autoscaling_setting_list CLOSE_CURLY_BRACKET
"""
p[0] = {"autoscaling": Autoscaling(**p[3])}
def p_autoscaling_setting_list(p):
"""
autoscaling_setting_list : autoscaling_setting autoscaling_setting_list
| autoscaling_setting
"""
if len(p) == 3:
p[0] = merge_map(p[1], p[2])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production in 'autoscaling_setting_list'")
def p_autoscaling_setting(p):
"""
autoscaling_setting : PERIOD COLON NUMBER
| LIMITS COLON OPEN_SQUARE_BRACKET NUMBER COMMA NUMBER CLOSE_SQUARE_BRACKET
"""
if len(p) == 8:
p[0] = {"limits": (int(p[4]), int(p[6]))}
elif len(p) == 4:
p[0] = {"period": int(p[3])}
else:
raise RuntimeError("Invalid product in 'autoscaling_setting'")
def p_operation_list(p):
"""
operation_list : define_operation operation_list
| define_operation
"""
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_operation_list'")
def p_define_client(p):
"""
define_client : CLIENT IDENTIFIER OPEN_CURLY_BRACKET EVERY NUMBER OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET CLOSE_CURLY_BRACKET
"""
p[0] = DefineClientStub(p[2], int(p[5]), p[7])
def p_define_operation(p):
"""
define_operation : OPERATION IDENTIFIER OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
p[0] = DefineOperation(p[2], p[4])
def p_action_list(p):
"""
action_list : action action_list
| action
"""
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_action_list'")
def p_action(p):
"""
action : invoke
| query
| think
| fail
| retry
| ignore
"""
p[0] = p[1]
def p_think(p):
"""
think : THINK NUMBER
"""
p[0] = Think(int(p[2]))
def p_fail(p):
"""
fail : FAIL NUMBER
| FAIL
"""
if len(p) > 2:
p[0] = Fail(float(p[2]))
else:
p[0] = Fail()
def p_query(p):
"""
query : QUERY IDENTIFIER SLASH IDENTIFIER
| QUERY IDENTIFIER SLASH IDENTIFIER OPEN_CURLY_BRACKET query_option_list CLOSE_CURLY_BRACKET
"""
parameters = {"service": p[2], "operation": p[4]}
if len(p) > 5:
parameters = merge_map(parameters, p[6])
p[0] = Query(**parameters)
def p_query_option_list(p):
"""
query_option_list : query_option COMMA query_option_list
| query_option
"""
if len(p) == 2:
p[0] = p[1]
elif len(p) == 4:
p[0] = merge_map(p[1], p[3])
else:
raise RuntimeError("Invalid product rules for 'query_option_list'")
def p_query_option(p):
"""
query_option : timeout
| priority
"""
p[0] = p[1]
def p_timeout(p):
"""
timeout : TIMEOUT COLON NUMBER
"""
p[0] = {"timeout": int(p[3])}
def p_priority(p):
"""
priority : PRIORITY COLON NUMBER
"""
p[0] = {"priority": int(p[3])}
def p_invoke(p):
"""
invoke : INVOKE IDENTIFIER SLASH IDENTIFIER
| INVOKE IDENTIFIER SLASH IDENTIFIER OPEN_CURLY_BRACKET PRIORITY COLON NUMBER CLOSE_CURLY_BRACKET
"""
priority = None
if len(p) > 5:
priority = int(p[8])
p[0] = Trigger(p[2], p[4], priority)
def p_retry(p):
"""
retry : RETRY OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
| RETRY OPEN_BRACKET retry_option_list CLOSE_BRACKET OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
if len(p) == 5:
p[0] = Retry(p[3])
elif len(p) == 8:
p[0] = Retry(p[6], **p[3])
else:
raise RuntimeError("Invalid product rules for 'retry_option_list'")
def p_retry_option_list(p):
"""
retry_option_list : retry_option COMMA retry_option_list
| retry_option
"""
if len(p) == 4:
p[0] = merge_map(p[1], p[3])
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production in 'retry_option_list'")
def p_ignore(p):
"""
ignore : IGNORE OPEN_CURLY_BRACKET action_list CLOSE_CURLY_BRACKET
"""
p[0] = IgnoreError(p[3])
def p_error(t):
raise MADSyntaxError((t.lineno, t.lexpos), t.value)
def merge_map(map_A, map_B):
tmp = map_A.copy()
tmp.update(map_B)
return tmp
class MADSyntaxError(BaseException):
def __init__(self, position, hint):
self.position = position
self.hint = hint
@property
def line_number(self):
return self.position[0]
def __repr__(self):
return "Syntax error at line {line:d}, around '{hint}'.".format(
line=self.position[0],
hint=self.hint)
class Parser:
def __init__(self, file_system, root_file):
self.root_file = root_file
self.file_system = file_system
def parse(self, entry_rule="unit", logger=yacc.NullLogger()):
lexer.lineno = 1
text = self._content()
parser = yacc.yacc(start=entry_rule, errorlog=logger)
return parser.parse(lexer=lexer, input=text)
def _content(self):
lines = self.file_system.open_input_stream(self.root_file).readlines()
return "\n".join(lines)
|
DomBennett/TaxonNamesResolver
|
taxon_names_resolver/manip_tools.py
|
stringClade
|
python
|
def stringClade(taxrefs, name, at):
'''Return a Newick string from a list of TaxRefs'''
string = []
for ref in taxrefs:
# distance is the difference between the taxonomic level of the ref
# and the current level of the tree growth
d = float(at-ref.level)
# ensure no spaces in ident, Newick tree cannot have spaces
ident = re.sub("\s", "_", ref.ident)
string.append('{0}:{1}'.format(ident, d))
# join into single string with a name for the clade
string = ','.join(string)
string = '({0}){1}'.format(string, name)
return string
|
Return a Newick string from a list of TaxRefs
|
train
|
https://github.com/DomBennett/TaxonNamesResolver/blob/a2556cc0f8b7442d83990715c92fdf6f787e1f41/taxon_names_resolver/manip_tools.py#L192-L205
| null |
#! /usr/bin/env python
# D.J. Bennett
# 24/03/2014
"""
misc tools
"""
from __future__ import absolute_import
# PACKAGES
import re
from six.moves import range
# GLOBALS
# nodes on a taxonomic tree
default_taxonomy = ['subspecies', 'species', 'subgenus', 'genus', 'tribe',
'subfamily', 'family', 'superfamily', 'parvorder',
'infraorder', 'suborder', 'order', 'superorder',
'parvclass', 'infraclass', 'subclass', 'class',
'superclass', 'subphylum', 'phylum', 'kingdom',
'superkingdom']
# CLASSES
class TaxRef(object):
'''Reference for taxonimic identities'''
def __init__(self, ident, rank, taxonomy=default_taxonomy):
super(TaxRef, self).__setattr__('taxonomy', taxonomy)
super(TaxRef, self).__setattr__('ident', ident)
super(TaxRef, self).__setattr__('rank', rank)
# level is a numerical value for rank in taxonomy
super(TaxRef, self).__setattr__('level',
self._getLevel(rank, taxonomy))
super(TaxRef, self).__setattr__('counter', 0) # count ident changes
def change(self, ident, rank=None):
'''Change ident'''
self.ident = ident
if rank:
self.rank = rank
self.level = self._getLevel(rank, self.taxonomy)
# count changes made to instance
self.counter += 1
def _getLevel(self, rank, taxonomy):
if rank in taxonomy:
return taxonomy.index(rank)
# else find its closest by using the default taxonomy
dlevel = default_taxonomy.index(rank)
i = 1
d = dlevel + i
up = True
while i <= len(default_taxonomy):
if d > 0:
try:
drank = default_taxonomy[d]
except IndexError:
pass
if drank in taxonomy:
return taxonomy.index(drank)
if up:
d = dlevel - i
up = False
else:
i += 1
d = dlevel + i
up = True
def __repr__(self):
return self.ident
def __str__(self):
return '{0} -- {1}@{2}'.format(self.ident, self.rank, self.level)
def __setattr__(self, name, value):
if name in ['ident', 'rank']:
if not isinstance(value, str):
raise ValueError('[{0}] must be a string'.format(name))
super(TaxRef, self).__setattr__(name, value)
elif name in ['level', 'counter']:
if not isinstance(value, int):
raise ValueError('[{0}] must be an integer'.format(name))
super(TaxRef, self).__setattr__(name, value)
else:
raise AttributeError(name)
class TaxDict(dict):
'''Taxonomic Dictionary : hold and return taxonomic information'''
def __init__(self, idents, ranks, lineages, taxonomy=default_taxonomy,
**kwargs):
# add entry for each ident of lineages ordered by taxonomy
# ranks without corresponding lineage are given ''
# 'ident' is the unique name for a taxonomic entity (e.g. query name)
# 'ranks' must be the names of the corresponding ranks in lineages
# (e.g. classification_path_ranks)
# 'lineages' is the names for each of the ranks (e.g.
# classification_path or classification_path_ids)
if taxonomy:
self.taxonomy = taxonomy
else:
self.taxonomy = default_taxonomy
for i in range(len(idents)):
# extract lineage according to given taxonomy
lineage = [lineages[i][ranks[i].index(e)] if e in ranks[i] else ''
for e in self.taxonomy]
# create taxref
taxref = TaxRef(ident=idents[i], rank=ranks[i][-1],
taxonomy=self.taxonomy)
# create key for ident and insert a dictionary of:
# lineage, taxref, cident, ident and rank
self[idents[i]] = {'lineage': lineage, 'taxref': taxref,
'cident': None, 'rank': ranks[i][-1],
'ident': lineage[taxref.level]}
# add addtional optional slots from **kwargs
self._additional(idents, kwargs)
# gen hierarchy
self._hierarchy()
# contexualise
self._contextualise()
def _additional(self, idents, kwargs):
'''Add additional data slots from **kwargs'''
if kwargs:
for name, value in list(kwargs.items()):
if not isinstance(value, list):
raise ValueError('Additional arguments must be lists of \
same length as idents')
for i in range(len(value)):
self[idents[i]][name] = value[i]
def _slice(self, level):
'''Return list of tuples of ident and lineage ident for given level
(numbered rank)'''
if level >= len(self.taxonomy):
raise IndexError('Level greater than size of taxonomy')
res = []
for ident in sorted(list(self.keys())):
res.append((self[ident]['taxref'], self[ident]['lineage'][level]))
return res
def _group(self, taxslice):
'''Return list of lists of idents grouped by shared rank'''
res = []
while taxslice:
taxref, lident = taxslice.pop()
if lident == '':
res.append(([taxref], lident))
else:
# identify idents in the same group and pop from taxslice
i = 0
group = []
while i < len(taxslice):
if taxslice[i][1] == lident:
group.append(taxslice.pop(i)[0])
else:
i += 1
group.append(taxref)
res.append((group, lident))
return res
def _hierarchy(self):
'''Generate dictionary of referenced idents grouped by shared rank'''
self.hierarchy = {}
for rank in self.taxonomy:
# extract lineage idents for this rank
taxslice = self._slice(level=self.taxonomy.index(rank))
# group idents by shared group at this rank
self.hierarchy[rank] = self._group(taxslice)
def _contextualise(self):
'''Determine contextual idents (cidents)'''
# loop through hierarchy identifying unique lineages
# TODO: gain other contextual information, not just ident
deja_vues = []
for rank in reversed(self.taxonomy):
# return named clades -- '' are ignored
clades = [e for e in self.hierarchy[rank] if e[1]]
# print 'Rank: {0} - {1}'.format(rank, len(clades))
# get unique lineages at this level
uniques = [e for e in clades if len(e[0]) == 1]
# removed those already seen
uniques = [e for e in uniques if e[0][0].ident not in deja_vues]
# add each to self[ident]['cident']
for e in uniques:
ident = e[0][0].ident
self[ident]['cident'] = e[1]
deja_vues.append(ident)
# FUNCTIONS
def taxTree(taxdict):
"""Return taxonomic Newick tree"""
# the taxonomic dictionary holds the lineage of each ident in
# the same order as the taxonomy
# use hierarchy to construct a taxonomic tree
for rank in taxdict.taxonomy:
current_level = float(taxdict.taxonomy.index(rank))
# get clades at this rank in hierarchy
clades = taxdict.hierarchy[rank]
# merge those that are in the same clade into a cladestring
for clade in clades:
# unpack the identities in this clade and its clade name
cladeidents, cladename = clade
# Remove '' TaxRefs -- in cladestring already
cladeidents = [e for e in cladeidents if e.ident]
# only create cladestring if more than one ident in clade
if len(cladeidents) < 2:
continue
# label node by 'clade'_'rank'
cladename = '{0}_{1}'.format(cladename, rank)
cladestring = stringClade(cladeidents, cladename, current_level)
# replace first TaxRef in cladeidents with cladestring
cladeidents[0].change(ident=cladestring, rank=rank)
# replace all other TaxRefs with ''
for e in cladeidents[1:]:
e.change(ident='', rank=rank)
# join any remaining strands into tree
if len(taxdict.hierarchy[taxdict.taxonomy[-1]]) > 1:
# unlist first
clade = [e[0] for e in taxdict.hierarchy[taxdict.taxonomy[-1]]]
cladeidents = sum(clade, [])
cladeidents = [e for e in cladeidents if e.ident]
cladestring = stringClade(cladeidents, 'life', current_level+1)
return cladestring + ';'
|
DomBennett/TaxonNamesResolver
|
taxon_names_resolver/manip_tools.py
|
taxTree
|
python
|
def taxTree(taxdict):
# the taxonomic dictionary holds the lineage of each ident in
# the same order as the taxonomy
# use hierarchy to construct a taxonomic tree
for rank in taxdict.taxonomy:
current_level = float(taxdict.taxonomy.index(rank))
# get clades at this rank in hierarchy
clades = taxdict.hierarchy[rank]
# merge those that are in the same clade into a cladestring
for clade in clades:
# unpack the identities in this clade and its clade name
cladeidents, cladename = clade
# Remove '' TaxRefs -- in cladestring already
cladeidents = [e for e in cladeidents if e.ident]
# only create cladestring if more than one ident in clade
if len(cladeidents) < 2:
continue
# label node by 'clade'_'rank'
cladename = '{0}_{1}'.format(cladename, rank)
cladestring = stringClade(cladeidents, cladename, current_level)
# replace first TaxRef in cladeidents with cladestring
cladeidents[0].change(ident=cladestring, rank=rank)
# replace all other TaxRefs with ''
for e in cladeidents[1:]:
e.change(ident='', rank=rank)
# join any remaining strands into tree
if len(taxdict.hierarchy[taxdict.taxonomy[-1]]) > 1:
# unlist first
clade = [e[0] for e in taxdict.hierarchy[taxdict.taxonomy[-1]]]
cladeidents = sum(clade, [])
cladeidents = [e for e in cladeidents if e.ident]
cladestring = stringClade(cladeidents, 'life', current_level+1)
return cladestring + ';'
|
Return taxonomic Newick tree
|
train
|
https://github.com/DomBennett/TaxonNamesResolver/blob/a2556cc0f8b7442d83990715c92fdf6f787e1f41/taxon_names_resolver/manip_tools.py#L208-L241
|
[
"def stringClade(taxrefs, name, at):\n '''Return a Newick string from a list of TaxRefs'''\n string = []\n for ref in taxrefs:\n # distance is the difference between the taxonomic level of the ref\n # and the current level of the tree growth\n d = float(at-ref.level)\n # ensure no spaces in ident, Newick tree cannot have spaces\n ident = re.sub(\"\\s\", \"_\", ref.ident)\n string.append('{0}:{1}'.format(ident, d))\n # join into single string with a name for the clade\n string = ','.join(string)\n string = '({0}){1}'.format(string, name)\n return string\n"
] |
#! /usr/bin/env python
# D.J. Bennett
# 24/03/2014
"""
misc tools
"""
from __future__ import absolute_import
# PACKAGES
import re
from six.moves import range
# GLOBALS
# nodes on a taxonomic tree
default_taxonomy = ['subspecies', 'species', 'subgenus', 'genus', 'tribe',
'subfamily', 'family', 'superfamily', 'parvorder',
'infraorder', 'suborder', 'order', 'superorder',
'parvclass', 'infraclass', 'subclass', 'class',
'superclass', 'subphylum', 'phylum', 'kingdom',
'superkingdom']
# CLASSES
class TaxRef(object):
'''Reference for taxonimic identities'''
def __init__(self, ident, rank, taxonomy=default_taxonomy):
super(TaxRef, self).__setattr__('taxonomy', taxonomy)
super(TaxRef, self).__setattr__('ident', ident)
super(TaxRef, self).__setattr__('rank', rank)
# level is a numerical value for rank in taxonomy
super(TaxRef, self).__setattr__('level',
self._getLevel(rank, taxonomy))
super(TaxRef, self).__setattr__('counter', 0) # count ident changes
def change(self, ident, rank=None):
'''Change ident'''
self.ident = ident
if rank:
self.rank = rank
self.level = self._getLevel(rank, self.taxonomy)
# count changes made to instance
self.counter += 1
def _getLevel(self, rank, taxonomy):
if rank in taxonomy:
return taxonomy.index(rank)
# else find its closest by using the default taxonomy
dlevel = default_taxonomy.index(rank)
i = 1
d = dlevel + i
up = True
while i <= len(default_taxonomy):
if d > 0:
try:
drank = default_taxonomy[d]
except IndexError:
pass
if drank in taxonomy:
return taxonomy.index(drank)
if up:
d = dlevel - i
up = False
else:
i += 1
d = dlevel + i
up = True
def __repr__(self):
return self.ident
def __str__(self):
return '{0} -- {1}@{2}'.format(self.ident, self.rank, self.level)
def __setattr__(self, name, value):
if name in ['ident', 'rank']:
if not isinstance(value, str):
raise ValueError('[{0}] must be a string'.format(name))
super(TaxRef, self).__setattr__(name, value)
elif name in ['level', 'counter']:
if not isinstance(value, int):
raise ValueError('[{0}] must be an integer'.format(name))
super(TaxRef, self).__setattr__(name, value)
else:
raise AttributeError(name)
class TaxDict(dict):
'''Taxonomic Dictionary : hold and return taxonomic information'''
def __init__(self, idents, ranks, lineages, taxonomy=default_taxonomy,
**kwargs):
# add entry for each ident of lineages ordered by taxonomy
# ranks without corresponding lineage are given ''
# 'ident' is the unique name for a taxonomic entity (e.g. query name)
# 'ranks' must be the names of the corresponding ranks in lineages
# (e.g. classification_path_ranks)
# 'lineages' is the names for each of the ranks (e.g.
# classification_path or classification_path_ids)
if taxonomy:
self.taxonomy = taxonomy
else:
self.taxonomy = default_taxonomy
for i in range(len(idents)):
# extract lineage according to given taxonomy
lineage = [lineages[i][ranks[i].index(e)] if e in ranks[i] else ''
for e in self.taxonomy]
# create taxref
taxref = TaxRef(ident=idents[i], rank=ranks[i][-1],
taxonomy=self.taxonomy)
# create key for ident and insert a dictionary of:
# lineage, taxref, cident, ident and rank
self[idents[i]] = {'lineage': lineage, 'taxref': taxref,
'cident': None, 'rank': ranks[i][-1],
'ident': lineage[taxref.level]}
# add addtional optional slots from **kwargs
self._additional(idents, kwargs)
# gen hierarchy
self._hierarchy()
# contexualise
self._contextualise()
def _additional(self, idents, kwargs):
'''Add additional data slots from **kwargs'''
if kwargs:
for name, value in list(kwargs.items()):
if not isinstance(value, list):
raise ValueError('Additional arguments must be lists of \
same length as idents')
for i in range(len(value)):
self[idents[i]][name] = value[i]
def _slice(self, level):
'''Return list of tuples of ident and lineage ident for given level
(numbered rank)'''
if level >= len(self.taxonomy):
raise IndexError('Level greater than size of taxonomy')
res = []
for ident in sorted(list(self.keys())):
res.append((self[ident]['taxref'], self[ident]['lineage'][level]))
return res
def _group(self, taxslice):
'''Return list of lists of idents grouped by shared rank'''
res = []
while taxslice:
taxref, lident = taxslice.pop()
if lident == '':
res.append(([taxref], lident))
else:
# identify idents in the same group and pop from taxslice
i = 0
group = []
while i < len(taxslice):
if taxslice[i][1] == lident:
group.append(taxslice.pop(i)[0])
else:
i += 1
group.append(taxref)
res.append((group, lident))
return res
def _hierarchy(self):
'''Generate dictionary of referenced idents grouped by shared rank'''
self.hierarchy = {}
for rank in self.taxonomy:
# extract lineage idents for this rank
taxslice = self._slice(level=self.taxonomy.index(rank))
# group idents by shared group at this rank
self.hierarchy[rank] = self._group(taxslice)
def _contextualise(self):
'''Determine contextual idents (cidents)'''
# loop through hierarchy identifying unique lineages
# TODO: gain other contextual information, not just ident
deja_vues = []
for rank in reversed(self.taxonomy):
# return named clades -- '' are ignored
clades = [e for e in self.hierarchy[rank] if e[1]]
# print 'Rank: {0} - {1}'.format(rank, len(clades))
# get unique lineages at this level
uniques = [e for e in clades if len(e[0]) == 1]
# removed those already seen
uniques = [e for e in uniques if e[0][0].ident not in deja_vues]
# add each to self[ident]['cident']
for e in uniques:
ident = e[0][0].ident
self[ident]['cident'] = e[1]
deja_vues.append(ident)
# FUNCTIONS
def stringClade(taxrefs, name, at):
'''Return a Newick string from a list of TaxRefs'''
string = []
for ref in taxrefs:
# distance is the difference between the taxonomic level of the ref
# and the current level of the tree growth
d = float(at-ref.level)
# ensure no spaces in ident, Newick tree cannot have spaces
ident = re.sub("\s", "_", ref.ident)
string.append('{0}:{1}'.format(ident, d))
# join into single string with a name for the clade
string = ','.join(string)
string = '({0}){1}'.format(string, name)
return string
|
DomBennett/TaxonNamesResolver
|
taxon_names_resolver/manip_tools.py
|
TaxRef.change
|
python
|
def change(self, ident, rank=None):
'''Change ident'''
self.ident = ident
if rank:
self.rank = rank
self.level = self._getLevel(rank, self.taxonomy)
# count changes made to instance
self.counter += 1
|
Change ident
|
train
|
https://github.com/DomBennett/TaxonNamesResolver/blob/a2556cc0f8b7442d83990715c92fdf6f787e1f41/taxon_names_resolver/manip_tools.py#L35-L42
|
[
"def _getLevel(self, rank, taxonomy):\n if rank in taxonomy:\n return taxonomy.index(rank)\n # else find its closest by using the default taxonomy\n dlevel = default_taxonomy.index(rank)\n i = 1\n d = dlevel + i\n up = True\n while i <= len(default_taxonomy):\n if d > 0:\n try:\n drank = default_taxonomy[d]\n except IndexError:\n pass\n if drank in taxonomy:\n return taxonomy.index(drank)\n if up:\n d = dlevel - i\n up = False\n else:\n i += 1\n d = dlevel + i\n up = True\n"
] |
class TaxRef(object):
'''Reference for taxonimic identities'''
def __init__(self, ident, rank, taxonomy=default_taxonomy):
super(TaxRef, self).__setattr__('taxonomy', taxonomy)
super(TaxRef, self).__setattr__('ident', ident)
super(TaxRef, self).__setattr__('rank', rank)
# level is a numerical value for rank in taxonomy
super(TaxRef, self).__setattr__('level',
self._getLevel(rank, taxonomy))
super(TaxRef, self).__setattr__('counter', 0) # count ident changes
def _getLevel(self, rank, taxonomy):
if rank in taxonomy:
return taxonomy.index(rank)
# else find its closest by using the default taxonomy
dlevel = default_taxonomy.index(rank)
i = 1
d = dlevel + i
up = True
while i <= len(default_taxonomy):
if d > 0:
try:
drank = default_taxonomy[d]
except IndexError:
pass
if drank in taxonomy:
return taxonomy.index(drank)
if up:
d = dlevel - i
up = False
else:
i += 1
d = dlevel + i
up = True
def __repr__(self):
return self.ident
def __str__(self):
return '{0} -- {1}@{2}'.format(self.ident, self.rank, self.level)
def __setattr__(self, name, value):
if name in ['ident', 'rank']:
if not isinstance(value, str):
raise ValueError('[{0}] must be a string'.format(name))
super(TaxRef, self).__setattr__(name, value)
elif name in ['level', 'counter']:
if not isinstance(value, int):
raise ValueError('[{0}] must be an integer'.format(name))
super(TaxRef, self).__setattr__(name, value)
else:
raise AttributeError(name)
|
DomBennett/TaxonNamesResolver
|
taxon_names_resolver/manip_tools.py
|
TaxDict._additional
|
python
|
def _additional(self, idents, kwargs):
'''Add additional data slots from **kwargs'''
if kwargs:
for name, value in list(kwargs.items()):
if not isinstance(value, list):
raise ValueError('Additional arguments must be lists of \
same length as idents')
for i in range(len(value)):
self[idents[i]][name] = value[i]
|
Add additional data slots from **kwargs
|
train
|
https://github.com/DomBennett/TaxonNamesResolver/blob/a2556cc0f8b7442d83990715c92fdf6f787e1f41/taxon_names_resolver/manip_tools.py#L122-L130
| null |
class TaxDict(dict):
'''Taxonomic Dictionary : hold and return taxonomic information'''
def __init__(self, idents, ranks, lineages, taxonomy=default_taxonomy,
**kwargs):
# add entry for each ident of lineages ordered by taxonomy
# ranks without corresponding lineage are given ''
# 'ident' is the unique name for a taxonomic entity (e.g. query name)
# 'ranks' must be the names of the corresponding ranks in lineages
# (e.g. classification_path_ranks)
# 'lineages' is the names for each of the ranks (e.g.
# classification_path or classification_path_ids)
if taxonomy:
self.taxonomy = taxonomy
else:
self.taxonomy = default_taxonomy
for i in range(len(idents)):
# extract lineage according to given taxonomy
lineage = [lineages[i][ranks[i].index(e)] if e in ranks[i] else ''
for e in self.taxonomy]
# create taxref
taxref = TaxRef(ident=idents[i], rank=ranks[i][-1],
taxonomy=self.taxonomy)
# create key for ident and insert a dictionary of:
# lineage, taxref, cident, ident and rank
self[idents[i]] = {'lineage': lineage, 'taxref': taxref,
'cident': None, 'rank': ranks[i][-1],
'ident': lineage[taxref.level]}
# add addtional optional slots from **kwargs
self._additional(idents, kwargs)
# gen hierarchy
self._hierarchy()
# contexualise
self._contextualise()
def _slice(self, level):
'''Return list of tuples of ident and lineage ident for given level
(numbered rank)'''
if level >= len(self.taxonomy):
raise IndexError('Level greater than size of taxonomy')
res = []
for ident in sorted(list(self.keys())):
res.append((self[ident]['taxref'], self[ident]['lineage'][level]))
return res
def _group(self, taxslice):
'''Return list of lists of idents grouped by shared rank'''
res = []
while taxslice:
taxref, lident = taxslice.pop()
if lident == '':
res.append(([taxref], lident))
else:
# identify idents in the same group and pop from taxslice
i = 0
group = []
while i < len(taxslice):
if taxslice[i][1] == lident:
group.append(taxslice.pop(i)[0])
else:
i += 1
group.append(taxref)
res.append((group, lident))
return res
def _hierarchy(self):
'''Generate dictionary of referenced idents grouped by shared rank'''
self.hierarchy = {}
for rank in self.taxonomy:
# extract lineage idents for this rank
taxslice = self._slice(level=self.taxonomy.index(rank))
# group idents by shared group at this rank
self.hierarchy[rank] = self._group(taxslice)
def _contextualise(self):
'''Determine contextual idents (cidents)'''
# loop through hierarchy identifying unique lineages
# TODO: gain other contextual information, not just ident
deja_vues = []
for rank in reversed(self.taxonomy):
# return named clades -- '' are ignored
clades = [e for e in self.hierarchy[rank] if e[1]]
# print 'Rank: {0} - {1}'.format(rank, len(clades))
# get unique lineages at this level
uniques = [e for e in clades if len(e[0]) == 1]
# removed those already seen
uniques = [e for e in uniques if e[0][0].ident not in deja_vues]
# add each to self[ident]['cident']
for e in uniques:
ident = e[0][0].ident
self[ident]['cident'] = e[1]
deja_vues.append(ident)
|
DomBennett/TaxonNamesResolver
|
taxon_names_resolver/manip_tools.py
|
TaxDict._slice
|
python
|
def _slice(self, level):
'''Return list of tuples of ident and lineage ident for given level
(numbered rank)'''
if level >= len(self.taxonomy):
raise IndexError('Level greater than size of taxonomy')
res = []
for ident in sorted(list(self.keys())):
res.append((self[ident]['taxref'], self[ident]['lineage'][level]))
return res
|
Return list of tuples of ident and lineage ident for given level
(numbered rank)
|
train
|
https://github.com/DomBennett/TaxonNamesResolver/blob/a2556cc0f8b7442d83990715c92fdf6f787e1f41/taxon_names_resolver/manip_tools.py#L132-L140
| null |
class TaxDict(dict):
'''Taxonomic Dictionary : hold and return taxonomic information'''
def __init__(self, idents, ranks, lineages, taxonomy=default_taxonomy,
**kwargs):
# add entry for each ident of lineages ordered by taxonomy
# ranks without corresponding lineage are given ''
# 'ident' is the unique name for a taxonomic entity (e.g. query name)
# 'ranks' must be the names of the corresponding ranks in lineages
# (e.g. classification_path_ranks)
# 'lineages' is the names for each of the ranks (e.g.
# classification_path or classification_path_ids)
if taxonomy:
self.taxonomy = taxonomy
else:
self.taxonomy = default_taxonomy
for i in range(len(idents)):
# extract lineage according to given taxonomy
lineage = [lineages[i][ranks[i].index(e)] if e in ranks[i] else ''
for e in self.taxonomy]
# create taxref
taxref = TaxRef(ident=idents[i], rank=ranks[i][-1],
taxonomy=self.taxonomy)
# create key for ident and insert a dictionary of:
# lineage, taxref, cident, ident and rank
self[idents[i]] = {'lineage': lineage, 'taxref': taxref,
'cident': None, 'rank': ranks[i][-1],
'ident': lineage[taxref.level]}
# add addtional optional slots from **kwargs
self._additional(idents, kwargs)
# gen hierarchy
self._hierarchy()
# contexualise
self._contextualise()
def _additional(self, idents, kwargs):
'''Add additional data slots from **kwargs'''
if kwargs:
for name, value in list(kwargs.items()):
if not isinstance(value, list):
raise ValueError('Additional arguments must be lists of \
same length as idents')
for i in range(len(value)):
self[idents[i]][name] = value[i]
def _group(self, taxslice):
'''Return list of lists of idents grouped by shared rank'''
res = []
while taxslice:
taxref, lident = taxslice.pop()
if lident == '':
res.append(([taxref], lident))
else:
# identify idents in the same group and pop from taxslice
i = 0
group = []
while i < len(taxslice):
if taxslice[i][1] == lident:
group.append(taxslice.pop(i)[0])
else:
i += 1
group.append(taxref)
res.append((group, lident))
return res
def _hierarchy(self):
'''Generate dictionary of referenced idents grouped by shared rank'''
self.hierarchy = {}
for rank in self.taxonomy:
# extract lineage idents for this rank
taxslice = self._slice(level=self.taxonomy.index(rank))
# group idents by shared group at this rank
self.hierarchy[rank] = self._group(taxslice)
def _contextualise(self):
'''Determine contextual idents (cidents)'''
# loop through hierarchy identifying unique lineages
# TODO: gain other contextual information, not just ident
deja_vues = []
for rank in reversed(self.taxonomy):
# return named clades -- '' are ignored
clades = [e for e in self.hierarchy[rank] if e[1]]
# print 'Rank: {0} - {1}'.format(rank, len(clades))
# get unique lineages at this level
uniques = [e for e in clades if len(e[0]) == 1]
# removed those already seen
uniques = [e for e in uniques if e[0][0].ident not in deja_vues]
# add each to self[ident]['cident']
for e in uniques:
ident = e[0][0].ident
self[ident]['cident'] = e[1]
deja_vues.append(ident)
|
DomBennett/TaxonNamesResolver
|
taxon_names_resolver/manip_tools.py
|
TaxDict._group
|
python
|
def _group(self, taxslice):
'''Return list of lists of idents grouped by shared rank'''
res = []
while taxslice:
taxref, lident = taxslice.pop()
if lident == '':
res.append(([taxref], lident))
else:
# identify idents in the same group and pop from taxslice
i = 0
group = []
while i < len(taxslice):
if taxslice[i][1] == lident:
group.append(taxslice.pop(i)[0])
else:
i += 1
group.append(taxref)
res.append((group, lident))
return res
|
Return list of lists of idents grouped by shared rank
|
train
|
https://github.com/DomBennett/TaxonNamesResolver/blob/a2556cc0f8b7442d83990715c92fdf6f787e1f41/taxon_names_resolver/manip_tools.py#L142-L160
| null |
class TaxDict(dict):
'''Taxonomic Dictionary : hold and return taxonomic information'''
def __init__(self, idents, ranks, lineages, taxonomy=default_taxonomy,
**kwargs):
# add entry for each ident of lineages ordered by taxonomy
# ranks without corresponding lineage are given ''
# 'ident' is the unique name for a taxonomic entity (e.g. query name)
# 'ranks' must be the names of the corresponding ranks in lineages
# (e.g. classification_path_ranks)
# 'lineages' is the names for each of the ranks (e.g.
# classification_path or classification_path_ids)
if taxonomy:
self.taxonomy = taxonomy
else:
self.taxonomy = default_taxonomy
for i in range(len(idents)):
# extract lineage according to given taxonomy
lineage = [lineages[i][ranks[i].index(e)] if e in ranks[i] else ''
for e in self.taxonomy]
# create taxref
taxref = TaxRef(ident=idents[i], rank=ranks[i][-1],
taxonomy=self.taxonomy)
# create key for ident and insert a dictionary of:
# lineage, taxref, cident, ident and rank
self[idents[i]] = {'lineage': lineage, 'taxref': taxref,
'cident': None, 'rank': ranks[i][-1],
'ident': lineage[taxref.level]}
# add addtional optional slots from **kwargs
self._additional(idents, kwargs)
# gen hierarchy
self._hierarchy()
# contexualise
self._contextualise()
def _additional(self, idents, kwargs):
'''Add additional data slots from **kwargs'''
if kwargs:
for name, value in list(kwargs.items()):
if not isinstance(value, list):
raise ValueError('Additional arguments must be lists of \
same length as idents')
for i in range(len(value)):
self[idents[i]][name] = value[i]
def _slice(self, level):
'''Return list of tuples of ident and lineage ident for given level
(numbered rank)'''
if level >= len(self.taxonomy):
raise IndexError('Level greater than size of taxonomy')
res = []
for ident in sorted(list(self.keys())):
res.append((self[ident]['taxref'], self[ident]['lineage'][level]))
return res
def _hierarchy(self):
'''Generate dictionary of referenced idents grouped by shared rank'''
self.hierarchy = {}
for rank in self.taxonomy:
# extract lineage idents for this rank
taxslice = self._slice(level=self.taxonomy.index(rank))
# group idents by shared group at this rank
self.hierarchy[rank] = self._group(taxslice)
def _contextualise(self):
'''Determine contextual idents (cidents)'''
# loop through hierarchy identifying unique lineages
# TODO: gain other contextual information, not just ident
deja_vues = []
for rank in reversed(self.taxonomy):
# return named clades -- '' are ignored
clades = [e for e in self.hierarchy[rank] if e[1]]
# print 'Rank: {0} - {1}'.format(rank, len(clades))
# get unique lineages at this level
uniques = [e for e in clades if len(e[0]) == 1]
# removed those already seen
uniques = [e for e in uniques if e[0][0].ident not in deja_vues]
# add each to self[ident]['cident']
for e in uniques:
ident = e[0][0].ident
self[ident]['cident'] = e[1]
deja_vues.append(ident)
|
DomBennett/TaxonNamesResolver
|
taxon_names_resolver/manip_tools.py
|
TaxDict._hierarchy
|
python
|
def _hierarchy(self):
'''Generate dictionary of referenced idents grouped by shared rank'''
self.hierarchy = {}
for rank in self.taxonomy:
# extract lineage idents for this rank
taxslice = self._slice(level=self.taxonomy.index(rank))
# group idents by shared group at this rank
self.hierarchy[rank] = self._group(taxslice)
|
Generate dictionary of referenced idents grouped by shared rank
|
train
|
https://github.com/DomBennett/TaxonNamesResolver/blob/a2556cc0f8b7442d83990715c92fdf6f787e1f41/taxon_names_resolver/manip_tools.py#L162-L169
|
[
" def _slice(self, level):\n '''Return list of tuples of ident and lineage ident for given level\n(numbered rank)'''\n if level >= len(self.taxonomy):\n raise IndexError('Level greater than size of taxonomy')\n res = []\n for ident in sorted(list(self.keys())):\n res.append((self[ident]['taxref'], self[ident]['lineage'][level]))\n return res\n",
"def _group(self, taxslice):\n '''Return list of lists of idents grouped by shared rank'''\n res = []\n while taxslice:\n taxref, lident = taxslice.pop()\n if lident == '':\n res.append(([taxref], lident))\n else:\n # identify idents in the same group and pop from taxslice\n i = 0\n group = []\n while i < len(taxslice):\n if taxslice[i][1] == lident:\n group.append(taxslice.pop(i)[0])\n else:\n i += 1\n group.append(taxref)\n res.append((group, lident))\n return res\n"
] |
class TaxDict(dict):
'''Taxonomic Dictionary : hold and return taxonomic information'''
def __init__(self, idents, ranks, lineages, taxonomy=default_taxonomy,
**kwargs):
# add entry for each ident of lineages ordered by taxonomy
# ranks without corresponding lineage are given ''
# 'ident' is the unique name for a taxonomic entity (e.g. query name)
# 'ranks' must be the names of the corresponding ranks in lineages
# (e.g. classification_path_ranks)
# 'lineages' is the names for each of the ranks (e.g.
# classification_path or classification_path_ids)
if taxonomy:
self.taxonomy = taxonomy
else:
self.taxonomy = default_taxonomy
for i in range(len(idents)):
# extract lineage according to given taxonomy
lineage = [lineages[i][ranks[i].index(e)] if e in ranks[i] else ''
for e in self.taxonomy]
# create taxref
taxref = TaxRef(ident=idents[i], rank=ranks[i][-1],
taxonomy=self.taxonomy)
# create key for ident and insert a dictionary of:
# lineage, taxref, cident, ident and rank
self[idents[i]] = {'lineage': lineage, 'taxref': taxref,
'cident': None, 'rank': ranks[i][-1],
'ident': lineage[taxref.level]}
# add addtional optional slots from **kwargs
self._additional(idents, kwargs)
# gen hierarchy
self._hierarchy()
# contexualise
self._contextualise()
def _additional(self, idents, kwargs):
'''Add additional data slots from **kwargs'''
if kwargs:
for name, value in list(kwargs.items()):
if not isinstance(value, list):
raise ValueError('Additional arguments must be lists of \
same length as idents')
for i in range(len(value)):
self[idents[i]][name] = value[i]
def _slice(self, level):
'''Return list of tuples of ident and lineage ident for given level
(numbered rank)'''
if level >= len(self.taxonomy):
raise IndexError('Level greater than size of taxonomy')
res = []
for ident in sorted(list(self.keys())):
res.append((self[ident]['taxref'], self[ident]['lineage'][level]))
return res
def _group(self, taxslice):
'''Return list of lists of idents grouped by shared rank'''
res = []
while taxslice:
taxref, lident = taxslice.pop()
if lident == '':
res.append(([taxref], lident))
else:
# identify idents in the same group and pop from taxslice
i = 0
group = []
while i < len(taxslice):
if taxslice[i][1] == lident:
group.append(taxslice.pop(i)[0])
else:
i += 1
group.append(taxref)
res.append((group, lident))
return res
def _contextualise(self):
'''Determine contextual idents (cidents)'''
# loop through hierarchy identifying unique lineages
# TODO: gain other contextual information, not just ident
deja_vues = []
for rank in reversed(self.taxonomy):
# return named clades -- '' are ignored
clades = [e for e in self.hierarchy[rank] if e[1]]
# print 'Rank: {0} - {1}'.format(rank, len(clades))
# get unique lineages at this level
uniques = [e for e in clades if len(e[0]) == 1]
# removed those already seen
uniques = [e for e in uniques if e[0][0].ident not in deja_vues]
# add each to self[ident]['cident']
for e in uniques:
ident = e[0][0].ident
self[ident]['cident'] = e[1]
deja_vues.append(ident)
|
DomBennett/TaxonNamesResolver
|
taxon_names_resolver/manip_tools.py
|
TaxDict._contextualise
|
python
|
def _contextualise(self):
'''Determine contextual idents (cidents)'''
# loop through hierarchy identifying unique lineages
# TODO: gain other contextual information, not just ident
deja_vues = []
for rank in reversed(self.taxonomy):
# return named clades -- '' are ignored
clades = [e for e in self.hierarchy[rank] if e[1]]
# print 'Rank: {0} - {1}'.format(rank, len(clades))
# get unique lineages at this level
uniques = [e for e in clades if len(e[0]) == 1]
# removed those already seen
uniques = [e for e in uniques if e[0][0].ident not in deja_vues]
# add each to self[ident]['cident']
for e in uniques:
ident = e[0][0].ident
self[ident]['cident'] = e[1]
deja_vues.append(ident)
|
Determine contextual idents (cidents)
|
train
|
https://github.com/DomBennett/TaxonNamesResolver/blob/a2556cc0f8b7442d83990715c92fdf6f787e1f41/taxon_names_resolver/manip_tools.py#L171-L188
| null |
class TaxDict(dict):
'''Taxonomic Dictionary : hold and return taxonomic information'''
def __init__(self, idents, ranks, lineages, taxonomy=default_taxonomy,
**kwargs):
# add entry for each ident of lineages ordered by taxonomy
# ranks without corresponding lineage are given ''
# 'ident' is the unique name for a taxonomic entity (e.g. query name)
# 'ranks' must be the names of the corresponding ranks in lineages
# (e.g. classification_path_ranks)
# 'lineages' is the names for each of the ranks (e.g.
# classification_path or classification_path_ids)
if taxonomy:
self.taxonomy = taxonomy
else:
self.taxonomy = default_taxonomy
for i in range(len(idents)):
# extract lineage according to given taxonomy
lineage = [lineages[i][ranks[i].index(e)] if e in ranks[i] else ''
for e in self.taxonomy]
# create taxref
taxref = TaxRef(ident=idents[i], rank=ranks[i][-1],
taxonomy=self.taxonomy)
# create key for ident and insert a dictionary of:
# lineage, taxref, cident, ident and rank
self[idents[i]] = {'lineage': lineage, 'taxref': taxref,
'cident': None, 'rank': ranks[i][-1],
'ident': lineage[taxref.level]}
# add addtional optional slots from **kwargs
self._additional(idents, kwargs)
# gen hierarchy
self._hierarchy()
# contexualise
self._contextualise()
def _additional(self, idents, kwargs):
'''Add additional data slots from **kwargs'''
if kwargs:
for name, value in list(kwargs.items()):
if not isinstance(value, list):
raise ValueError('Additional arguments must be lists of \
same length as idents')
for i in range(len(value)):
self[idents[i]][name] = value[i]
def _slice(self, level):
'''Return list of tuples of ident and lineage ident for given level
(numbered rank)'''
if level >= len(self.taxonomy):
raise IndexError('Level greater than size of taxonomy')
res = []
for ident in sorted(list(self.keys())):
res.append((self[ident]['taxref'], self[ident]['lineage'][level]))
return res
def _group(self, taxslice):
'''Return list of lists of idents grouped by shared rank'''
res = []
while taxslice:
taxref, lident = taxslice.pop()
if lident == '':
res.append(([taxref], lident))
else:
# identify idents in the same group and pop from taxslice
i = 0
group = []
while i < len(taxslice):
if taxslice[i][1] == lident:
group.append(taxslice.pop(i)[0])
else:
i += 1
group.append(taxref)
res.append((group, lident))
return res
def _hierarchy(self):
'''Generate dictionary of referenced idents grouped by shared rank'''
self.hierarchy = {}
for rank in self.taxonomy:
# extract lineage idents for this rank
taxslice = self._slice(level=self.taxonomy.index(rank))
# group idents by shared group at this rank
self.hierarchy[rank] = self._group(taxslice)
|
DomBennett/TaxonNamesResolver
|
taxon_names_resolver/gnr_tools.py
|
safeReadJSON
|
python
|
def safeReadJSON(url, logger, max_check=6, waittime=30):
'''Return JSON object from URL'''
counter = 0
# try, try and try again ....
while counter < max_check:
try:
with contextlib.closing(urllib.request.urlopen(url)) as f:
res = json.loads(f.read().decode('utf8'))
return res
except Exception as errmsg:
logger.info('----- GNR error [{0}] : retrying ----'.format(errmsg))
counter += 1
time.sleep(waittime)
logger.error('----- Returning nothing : GNR server may be down -----')
return None
|
Return JSON object from URL
|
train
|
https://github.com/DomBennett/TaxonNamesResolver/blob/a2556cc0f8b7442d83990715c92fdf6f787e1f41/taxon_names_resolver/gnr_tools.py#L18-L32
| null |
#! /usr/bin/env python
# D. J. Bennett
# 16/05/2014
"""
Tools for interacting with the GNR.
"""
from __future__ import absolute_import
import time
import contextlib
import json
import os
import six
from six.moves import urllib
# FUNCTIONS
# CLASSES
class GnrDataSources(object):
"""GNR data sources class: extract IDs for specified data sources."""
def __init__(self, logger):
url = 'http://resolver.globalnames.org/data_sources.json'
self.available = safeReadJSON(url, logger)
def summary(self):
# see what sources are available
return [dict(id=ds['id'], title=ds['title']) for ds in self.available]
def byName(self, names, invert=False):
if invert:
return [ds['id'] for ds in self.available if not ds['title'] in
names]
else:
return [ds['id'] for ds in self.available if ds['title'] in names]
class GnrResolver(object):
"""GNR resolver class: search the GNR"""
def __init__(self, logger, datasource='NCBI'):
self.logger = logger
ds = GnrDataSources(logger)
self.write_counter = 1
self.Id = ds.byName(datasource)
self.otherIds = ds.byName(datasource, invert=True)
self.waittime = 600 # wait ten minutes if server fail
self.max_check = 6 # search for up to an hour
def search(self, terms, prelim=True):
"""Search terms against GNR. If prelim = False, search other datasources \
for alternative names (i.e. synonyms) with which to search main datasource.\
Return JSON object."""
# TODO: There are now lots of additional data sources, make additional
# searching optional (11/01/2017)
if prelim: # preliminary search
res = self._resolve(terms, self.Id)
self._write(res)
return res
else: # search other DSs for alt names, search DS with these
# quick fix: https://github.com/DomBennett/TaxonNamesResolver/issues/5
# seems to be due to limit on number of ids in single request
# switiching to a for loop for each data source
# appending all results into single res
res = []
for ds_id in self.otherIds:
tmp = self._resolve(terms, [ds_id])
res.append(tmp[0])
self._write(res)
alt_terms = self._parseNames(res)
if len(alt_terms) == 0:
return False
else:
# search the main source again with alt_terms
# replace names in json
terms = [each[1] for each in alt_terms] # unzip
res = self._resolve(terms, self.Id)
self._write(res)
alt_res = self._replaceSupStrNames(res, alt_terms)
return alt_res
def _parseNames(self, jobj):
# return a list of tuples (term, name) from second search
# TODO(07/06/2013): record DSs used
alt_terms = []
for record in jobj:
if 'results' not in list(record.keys()):
pass
else:
term = record['supplied_name_string']
results = record['results']
for result in results:
r_name = result['canonical_form']
if r_name == term or r_name is None:
continue
alt_terms.append((term, r_name))
alt_terms = list(set(alt_terms))
return alt_terms
def _replaceSupStrNames(self, jobj, alt_terms):
# replace sup name in jobj with original terms
for record in jobj:
sup_name = record['supplied_name_string']
# find original name in alt_terms
term = [i for i, each in enumerate(alt_terms) if each[1] ==
sup_name]
# pop from alt_terms and rename json use 0 to
# avoid the possibility of having the same term with >1 r_names
term = alt_terms.pop(term[0])[0]
record['supplied_name_string'] = term
return jobj
def _resolve(self, terms, ds_id):
# Query server in chunks
chunk_size = 100
res = []
lower = 0
while lower < len(terms):
upper = min(len(terms), lower + chunk_size)
self.logger.info('Querying [{0}] to [{1}] of [{2}]'.
format(lower, upper, len(terms)))
query = self._query(terms[lower:upper], ds_id)
res.append(query)
lower = upper
res = [record for search in res for record in search['data']]
return(res)
def _query(self, terms, data_source_ids):
ds_ids = [str(id) for id in data_source_ids]
terms = [urllib.parse.quote(six.text_type(t).encode('utf8')) for t in terms]
url = ('http://resolver.globalnames.org/name_resolvers.json?' +
'data_source_ids=' + '|'.join(ds_ids) + '&' +
'resolve_once=false&' + 'names=' + '|'.join(terms))
return safeReadJSON(url, self.logger)
def _write(self, jobj):
directory = os.path.join(os.getcwd(), 'resolved_names')
filename = "{0}_raw_results.json".format(self.write_counter)
jobj_file = os.path.join(directory, filename)
with open(jobj_file, 'w') as outfile:
json.dump(jobj, outfile)
self.write_counter += 1
class GnrStore(dict):
"""GNR store class: acts like a dictionary for GNR JSON format"""
def __init__(self, terms, logger, tax_group=None):
self.logger = logger
# Issue 6: suggest multiple tax_groups, not just one
if not tax_group:
self.tax_group = tax_group
else:
if not isinstance(tax_group, list):
tax_group = [tax_group]
# ensure strings
self.tax_group = [str(e) for e in tax_group]
for term in terms:
self[term] = []
def _filter(self, results):
# filter out all results that are not in tax_group
if not self.tax_group:
return results
filtered = []
for result in results:
classids = result['classification_path_ids'].split('|')
if any([True if e in classids else False for e in self.tax_group]):
filtered.append(result)
return filtered
def add(self, jobj):
if not isinstance(jobj, bool):
for record in jobj:
term = record['supplied_name_string']
try:
if 'results' in list(record.keys()):
results = self._filter(record['results'])
self[term].extend(results)
except KeyError:
self.logger.debug('JSON object contains terms not in self.logger')
def replace(self, jobj):
for record in jobj:
term = record['supplied_name_string']
try:
if 'results' in list(record.keys()):
results = self._filter(record['results'])
self[term] = results
else:
self[term] = []
except KeyError:
self.logger.debug('JSON object contains terms not in GnrStore')
|
DomBennett/TaxonNamesResolver
|
taxon_names_resolver/gnr_tools.py
|
GnrResolver.search
|
python
|
def search(self, terms, prelim=True):
# TODO: There are now lots of additional data sources, make additional
# searching optional (11/01/2017)
if prelim: # preliminary search
res = self._resolve(terms, self.Id)
self._write(res)
return res
else: # search other DSs for alt names, search DS with these
# quick fix: https://github.com/DomBennett/TaxonNamesResolver/issues/5
# seems to be due to limit on number of ids in single request
# switiching to a for loop for each data source
# appending all results into single res
res = []
for ds_id in self.otherIds:
tmp = self._resolve(terms, [ds_id])
res.append(tmp[0])
self._write(res)
alt_terms = self._parseNames(res)
if len(alt_terms) == 0:
return False
else:
# search the main source again with alt_terms
# replace names in json
terms = [each[1] for each in alt_terms] # unzip
res = self._resolve(terms, self.Id)
self._write(res)
alt_res = self._replaceSupStrNames(res, alt_terms)
return alt_res
|
Search terms against GNR. If prelim = False, search other datasources \
for alternative names (i.e. synonyms) with which to search main datasource.\
Return JSON object.
|
train
|
https://github.com/DomBennett/TaxonNamesResolver/blob/a2556cc0f8b7442d83990715c92fdf6f787e1f41/taxon_names_resolver/gnr_tools.py#L67-L97
|
[
"def _parseNames(self, jobj):\n # return a list of tuples (term, name) from second search\n # TODO(07/06/2013): record DSs used\n alt_terms = []\n for record in jobj:\n if 'results' not in list(record.keys()):\n pass\n else:\n term = record['supplied_name_string']\n results = record['results']\n for result in results:\n r_name = result['canonical_form']\n if r_name == term or r_name is None:\n continue\n alt_terms.append((term, r_name))\n alt_terms = list(set(alt_terms))\n return alt_terms\n",
"def _replaceSupStrNames(self, jobj, alt_terms):\n # replace sup name in jobj with original terms\n for record in jobj:\n sup_name = record['supplied_name_string']\n # find original name in alt_terms\n term = [i for i, each in enumerate(alt_terms) if each[1] ==\n sup_name]\n # pop from alt_terms and rename json use 0 to\n # avoid the possibility of having the same term with >1 r_names\n term = alt_terms.pop(term[0])[0]\n record['supplied_name_string'] = term\n return jobj\n",
"def _resolve(self, terms, ds_id):\n # Query server in chunks\n chunk_size = 100\n res = []\n lower = 0\n while lower < len(terms):\n upper = min(len(terms), lower + chunk_size)\n self.logger.info('Querying [{0}] to [{1}] of [{2}]'.\n format(lower, upper, len(terms)))\n query = self._query(terms[lower:upper], ds_id)\n res.append(query)\n lower = upper\n res = [record for search in res for record in search['data']]\n return(res)\n",
"def _write(self, jobj):\n directory = os.path.join(os.getcwd(), 'resolved_names')\n filename = \"{0}_raw_results.json\".format(self.write_counter)\n jobj_file = os.path.join(directory, filename)\n with open(jobj_file, 'w') as outfile:\n json.dump(jobj, outfile)\n self.write_counter += 1\n"
] |
class GnrResolver(object):
"""GNR resolver class: search the GNR"""
def __init__(self, logger, datasource='NCBI'):
self.logger = logger
ds = GnrDataSources(logger)
self.write_counter = 1
self.Id = ds.byName(datasource)
self.otherIds = ds.byName(datasource, invert=True)
self.waittime = 600 # wait ten minutes if server fail
self.max_check = 6 # search for up to an hour
def _parseNames(self, jobj):
# return a list of tuples (term, name) from second search
# TODO(07/06/2013): record DSs used
alt_terms = []
for record in jobj:
if 'results' not in list(record.keys()):
pass
else:
term = record['supplied_name_string']
results = record['results']
for result in results:
r_name = result['canonical_form']
if r_name == term or r_name is None:
continue
alt_terms.append((term, r_name))
alt_terms = list(set(alt_terms))
return alt_terms
def _replaceSupStrNames(self, jobj, alt_terms):
# replace sup name in jobj with original terms
for record in jobj:
sup_name = record['supplied_name_string']
# find original name in alt_terms
term = [i for i, each in enumerate(alt_terms) if each[1] ==
sup_name]
# pop from alt_terms and rename json use 0 to
# avoid the possibility of having the same term with >1 r_names
term = alt_terms.pop(term[0])[0]
record['supplied_name_string'] = term
return jobj
def _resolve(self, terms, ds_id):
# Query server in chunks
chunk_size = 100
res = []
lower = 0
while lower < len(terms):
upper = min(len(terms), lower + chunk_size)
self.logger.info('Querying [{0}] to [{1}] of [{2}]'.
format(lower, upper, len(terms)))
query = self._query(terms[lower:upper], ds_id)
res.append(query)
lower = upper
res = [record for search in res for record in search['data']]
return(res)
def _query(self, terms, data_source_ids):
ds_ids = [str(id) for id in data_source_ids]
terms = [urllib.parse.quote(six.text_type(t).encode('utf8')) for t in terms]
url = ('http://resolver.globalnames.org/name_resolvers.json?' +
'data_source_ids=' + '|'.join(ds_ids) + '&' +
'resolve_once=false&' + 'names=' + '|'.join(terms))
return safeReadJSON(url, self.logger)
def _write(self, jobj):
directory = os.path.join(os.getcwd(), 'resolved_names')
filename = "{0}_raw_results.json".format(self.write_counter)
jobj_file = os.path.join(directory, filename)
with open(jobj_file, 'w') as outfile:
json.dump(jobj, outfile)
self.write_counter += 1
|
DomBennett/TaxonNamesResolver
|
TaxonNamesResolver.py
|
parseArgs
|
python
|
def parseArgs():
parser = argparse.ArgumentParser()
parser.add_argument("-names", "-n", help=".txt file of taxonomic names")
parser.add_argument("-datasource", "-d", help="taxonomic datasource by \
which names will be resolved (default NCBI)")
parser.add_argument("-taxonid", "-t", help="parent taxonomic ID")
parser.add_argument("--verbose", help="increase output verbosity",
action="store_true")
parser.add_argument('--details', help='display information about the \
program', action='store_true')
return parser.parse_args()
|
Read arguments
|
train
|
https://github.com/DomBennett/TaxonNamesResolver/blob/a2556cc0f8b7442d83990715c92fdf6f787e1f41/TaxonNamesResolver.py#L30-L41
| null |
#! /usr/bin/env python
# D.J. Bennett
# 16/05/2014
# Import
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import argparse
import logging
import platform
from datetime import datetime
from taxon_names_resolver import Resolver
from taxon_names_resolver import __version__ as version
from taxon_names_resolver import __doc__ as details
description = """
----------------------------------------------------------------------
TaxonNamesResolver Version {0}, Copyright (C) Bennett 2014
----------------------------------------------------------------------
This program comes with ABSOLUTELY NO WARRANTY. This is free software,
and you are welcome to redistribute it under certain conditions.
For more details, type `TaxonNamesResolver.py --details`.
----------------------------------------------------------------------
""".format(version)
# FUNCTIONS
def logSysInfo():
"""Write system info to log file"""
logger.info('#' * 70)
logger.info(datetime.today().strftime("%A, %d %B %Y %I:%M%p"))
logger.info('Running on [{0}] [{1}]'.format(platform.node(),
platform.platform()))
logger.info('Python [{0}]'.format(sys.version))
logger.info('#' * 70 + '\n')
def logEndTime():
"""Write end info to log"""
logger.info('\n' + '#' * 70)
logger.info('Complete')
logger.info(datetime.today().strftime("%A, %d %B %Y %I:%M%p"))
logger.info('#' * 70 + '\n')
# MAIN
if __name__ == '__main__':
args = parseArgs()
if args.details:
print('\nThis is TaxonNamesResolver, version: [{0}]'.format(version))
print(details)
sys.exit()
if not args.names:
print('No names file provided!')
print('Type `TaxonNamesResolver.py -h` for help.')
sys.exit()
if not os.path.isfile(args.names):
print('[{0}] could not be found!'.format(args.names))
sys.exit()
print('\n' + description + '\n')
if args.datasource:
datasource = args.datasource
else:
datasource = 'NCBI'
# simple logging, no levels, duplicate to console if verbose
logfile = 'log.txt'
logger = logging.getLogger('')
logger.setLevel(logging.INFO)
loghandler = logging.FileHandler(logfile, 'a')
loghandler.setFormatter(logging.Formatter('%(message)s'))
logger.addHandler(loghandler)
if args.verbose:
console = logging.StreamHandler()
console.setFormatter(logging.Formatter('%(message)s'))
logger.addHandler(console)
# log system info
logSysInfo()
resolver = Resolver(args.names, datasource, args.taxonid)
resolver.main()
resolver.write()
logEndTime()
if not args.verbose:
print('\nComplete\n')
|
DomBennett/TaxonNamesResolver
|
TaxonNamesResolver.py
|
logSysInfo
|
python
|
def logSysInfo():
logger.info('#' * 70)
logger.info(datetime.today().strftime("%A, %d %B %Y %I:%M%p"))
logger.info('Running on [{0}] [{1}]'.format(platform.node(),
platform.platform()))
logger.info('Python [{0}]'.format(sys.version))
logger.info('#' * 70 + '\n')
|
Write system info to log file
|
train
|
https://github.com/DomBennett/TaxonNamesResolver/blob/a2556cc0f8b7442d83990715c92fdf6f787e1f41/TaxonNamesResolver.py#L44-L51
| null |
#! /usr/bin/env python
# D.J. Bennett
# 16/05/2014
# Import
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import argparse
import logging
import platform
from datetime import datetime
from taxon_names_resolver import Resolver
from taxon_names_resolver import __version__ as version
from taxon_names_resolver import __doc__ as details
description = """
----------------------------------------------------------------------
TaxonNamesResolver Version {0}, Copyright (C) Bennett 2014
----------------------------------------------------------------------
This program comes with ABSOLUTELY NO WARRANTY. This is free software,
and you are welcome to redistribute it under certain conditions.
For more details, type `TaxonNamesResolver.py --details`.
----------------------------------------------------------------------
""".format(version)
# FUNCTIONS
def parseArgs():
"""Read arguments"""
parser = argparse.ArgumentParser()
parser.add_argument("-names", "-n", help=".txt file of taxonomic names")
parser.add_argument("-datasource", "-d", help="taxonomic datasource by \
which names will be resolved (default NCBI)")
parser.add_argument("-taxonid", "-t", help="parent taxonomic ID")
parser.add_argument("--verbose", help="increase output verbosity",
action="store_true")
parser.add_argument('--details', help='display information about the \
program', action='store_true')
return parser.parse_args()
def logEndTime():
"""Write end info to log"""
logger.info('\n' + '#' * 70)
logger.info('Complete')
logger.info(datetime.today().strftime("%A, %d %B %Y %I:%M%p"))
logger.info('#' * 70 + '\n')
# MAIN
if __name__ == '__main__':
args = parseArgs()
if args.details:
print('\nThis is TaxonNamesResolver, version: [{0}]'.format(version))
print(details)
sys.exit()
if not args.names:
print('No names file provided!')
print('Type `TaxonNamesResolver.py -h` for help.')
sys.exit()
if not os.path.isfile(args.names):
print('[{0}] could not be found!'.format(args.names))
sys.exit()
print('\n' + description + '\n')
if args.datasource:
datasource = args.datasource
else:
datasource = 'NCBI'
# simple logging, no levels, duplicate to console if verbose
logfile = 'log.txt'
logger = logging.getLogger('')
logger.setLevel(logging.INFO)
loghandler = logging.FileHandler(logfile, 'a')
loghandler.setFormatter(logging.Formatter('%(message)s'))
logger.addHandler(loghandler)
if args.verbose:
console = logging.StreamHandler()
console.setFormatter(logging.Formatter('%(message)s'))
logger.addHandler(console)
# log system info
logSysInfo()
resolver = Resolver(args.names, datasource, args.taxonid)
resolver.main()
resolver.write()
logEndTime()
if not args.verbose:
print('\nComplete\n')
|
DomBennett/TaxonNamesResolver
|
TaxonNamesResolver.py
|
logEndTime
|
python
|
def logEndTime():
logger.info('\n' + '#' * 70)
logger.info('Complete')
logger.info(datetime.today().strftime("%A, %d %B %Y %I:%M%p"))
logger.info('#' * 70 + '\n')
|
Write end info to log
|
train
|
https://github.com/DomBennett/TaxonNamesResolver/blob/a2556cc0f8b7442d83990715c92fdf6f787e1f41/TaxonNamesResolver.py#L54-L59
| null |
#! /usr/bin/env python
# D.J. Bennett
# 16/05/2014
# Import
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import argparse
import logging
import platform
from datetime import datetime
from taxon_names_resolver import Resolver
from taxon_names_resolver import __version__ as version
from taxon_names_resolver import __doc__ as details
description = """
----------------------------------------------------------------------
TaxonNamesResolver Version {0}, Copyright (C) Bennett 2014
----------------------------------------------------------------------
This program comes with ABSOLUTELY NO WARRANTY. This is free software,
and you are welcome to redistribute it under certain conditions.
For more details, type `TaxonNamesResolver.py --details`.
----------------------------------------------------------------------
""".format(version)
# FUNCTIONS
def parseArgs():
"""Read arguments"""
parser = argparse.ArgumentParser()
parser.add_argument("-names", "-n", help=".txt file of taxonomic names")
parser.add_argument("-datasource", "-d", help="taxonomic datasource by \
which names will be resolved (default NCBI)")
parser.add_argument("-taxonid", "-t", help="parent taxonomic ID")
parser.add_argument("--verbose", help="increase output verbosity",
action="store_true")
parser.add_argument('--details', help='display information about the \
program', action='store_true')
return parser.parse_args()
def logSysInfo():
"""Write system info to log file"""
logger.info('#' * 70)
logger.info(datetime.today().strftime("%A, %d %B %Y %I:%M%p"))
logger.info('Running on [{0}] [{1}]'.format(platform.node(),
platform.platform()))
logger.info('Python [{0}]'.format(sys.version))
logger.info('#' * 70 + '\n')
# MAIN
if __name__ == '__main__':
args = parseArgs()
if args.details:
print('\nThis is TaxonNamesResolver, version: [{0}]'.format(version))
print(details)
sys.exit()
if not args.names:
print('No names file provided!')
print('Type `TaxonNamesResolver.py -h` for help.')
sys.exit()
if not os.path.isfile(args.names):
print('[{0}] could not be found!'.format(args.names))
sys.exit()
print('\n' + description + '\n')
if args.datasource:
datasource = args.datasource
else:
datasource = 'NCBI'
# simple logging, no levels, duplicate to console if verbose
logfile = 'log.txt'
logger = logging.getLogger('')
logger.setLevel(logging.INFO)
loghandler = logging.FileHandler(logfile, 'a')
loghandler.setFormatter(logging.Formatter('%(message)s'))
logger.addHandler(loghandler)
if args.verbose:
console = logging.StreamHandler()
console.setFormatter(logging.Formatter('%(message)s'))
logger.addHandler(console)
# log system info
logSysInfo()
resolver = Resolver(args.names, datasource, args.taxonid)
resolver.main()
resolver.write()
logEndTime()
if not args.verbose:
print('\nComplete\n')
|
DomBennett/TaxonNamesResolver
|
taxon_names_resolver/resolver.py
|
Resolver._check
|
python
|
def _check(self, terms):
for t in terms:
try:
_ = urllib.parse.quote(six.text_type(t).encode('utf8'))
except:
self.logger.error('Unknown character in [{0}]!'.format(t))
self.logger.error('.... remove character and try again.')
raise EncodingError
|
Check terms do not contain unknown characters
|
train
|
https://github.com/DomBennett/TaxonNamesResolver/blob/a2556cc0f8b7442d83990715c92fdf6f787e1f41/taxon_names_resolver/resolver.py#L72-L80
| null |
class Resolver(object):
"""Taxon Names Resovler class : Automatically resolves taxon names \
through GNR. All output written in 'resolved_names' folder.
See https://github.com/DomBennett/TaxonNamesResolver for details."""
def __init__(self, input_file=None, datasource='NCBI', taxon_id=None,
terms=None, lowrank=False, logger=logging.getLogger('')):
# add logger
self.logger = logger
# organising dirs
self.directory = os.getcwd()
self.outdir = os.path.join(self.directory, 'resolved_names')
if not os.path.exists(self.outdir):
os.makedirs(self.outdir)
if input_file:
input_file = os.path.join(self.directory, input_file)
# reading in terms
terms = []
with open(input_file) as names:
for name in names:
terms.append(name.strip())
terms = [term for term in terms if not term == '']
else:
if not terms:
self.logger.info("No terms provided")
terms = list(set(terms))
self.logger.info('Found [{0}] taxon names to search in input file... '.
format(len(terms)))
self.logger.info('... of which [{0}] are unique.'.format(len(terms)))
# init dep classes
self._check(terms)
self.terms = terms
self._res = GnrResolver(logger=self.logger, datasource=datasource)
self.primary_datasource = datasource
self._store = GnrStore(terms, tax_group=taxon_id, logger=self.logger)
# http://resolver.globalnames.org/api
self.key_terms = ['query_name', 'classification_path',
'data_source_title', 'match_type', 'score',
'classification_path_ranks', 'name_string',
'canonical_form', 'classification_path_ids',
'prescore', 'data_source_id', 'taxon_id', 'gni_uuid']
self.lowrank = lowrank # return lowest ranked match
# self.tnr_obj = [] # this will hold all output
def main(self):
"""Search and sieve query names."""
# TODO: Break up, too complex
primary_bool = True
no_records = True
nsearch = 1
search_terms = self.terms
original_names = []
while True:
if primary_bool:
self.logger.info('Searching [{0}] ...'.format(
self.primary_datasource))
else:
self.logger.info('Searching other datasources ...')
res = self._res.search(search_terms, prelim=primary_bool)
if nsearch > 2 and res:
# if second search failed, look up alternative names
for each_res, original_name in zip(res, original_names):
each_res['supplied_name_string'] = original_name
self._store.add(res)
# Check for returns without records
no_records = self._count(nrecords=1)
if no_records:
if nsearch == 1:
primary_bool = False
elif nsearch == 2:
original_names = no_records
# genus names
no_records = [e.split()[0] for e in no_records]
primary_bool = True
elif nsearch == 3:
original_names = no_records
no_records = [e.split()[0] for e in no_records]
primary_bool = False
else:
break
else:
break
nsearch += 1
search_terms = no_records
# Check for multiple records
multi_records = self._count(greater=True, nrecords=1)
if multi_records:
self.logger.info('Choosing best records to return ...')
res = self._sieve(multi_records)
self._store.replace(res)
# def extract(self, what): # depends on tnr
# lkeys = ['qnames', 'rnames', 'taxonids', 'ranks']
# i = [i for i, each in enumerate(lkeys) if what is each][0]
# res = [each[i] for each in self.tnr_obj]
# return res
def _readInJson(self):
jobj_file = os.path.join(self.outdir, 'prelim_search_results.json')
with open(jobj_file, 'r') as infile:
jobj = json.load(infile)
return jobj
def _count(self, greater=False, nrecords=0):
# return a list of all keys that have records
# greater or less than nrecords
GnrStore = self._store
assessed = []
lens = [len(GnrStore[key]) for key in list(GnrStore.keys())]
if greater:
len_bools = [each > nrecords for each in lens]
else:
len_bools = [each < nrecords for each in lens]
for i, key in enumerate(GnrStore.keys()):
if len_bools[i]:
assessed.append(key)
if len(assessed) == 0:
return False
else:
return assessed
def _sieve(self, multiple_records):
"""Return json object without multiple returns per resolved name.\
Names with multiple records are reduced by finding the name in the clade of\
interest, have the highest score, have the lowest taxonomic rank (if lowrank is
true) and/or are the first item returned."""
# TODO: Break up, too complex
GnrStore = self._store
def writeAsJson(term, results):
record = {'supplied_name_string': term}
if len(results) > 0:
record['results'] = results
return record
def boolResults(results, bool_li, rand=False):
if rand:
# choose first record (most likely best?)
results = [results[0]]
elif sum(bool_li) == 1:
results = [results[bool_li.index(1)]]
elif sum(bool_li) == 0:
# return 'no_record'
return []
else:
results = [result for i, result in enumerate(results) if
bool_li[i]]
return results
sieved = []
ranks = ['species', 'genus', 'family', 'order', 'superorder', 'class',
'superclass', 'subphylum', 'phylum', 'kingdom',
'superkingdom']
for term in multiple_records:
results = GnrStore[term]
while len(results) > 1:
# choose result with best score
scores = [result['score'] for result in results]
bool_score = [1 if score == max(scores) else 0 for score in
scores]
results = boolResults(results, bool_score)
# choose result resolved to lowest taxonomic rank
if self.lowrank:
res_ranks = [result['classification_path_ranks'].
split('|') for result in results]
# calculate 'rank scores' for named and un-named ranks
nmd_rnks = []
unnmd_rnks = []
for rs in res_ranks:
nmd_rnks.append(min([j for j,e in enumerate(ranks) if
e in rs]))
unnmd_rnk = [j for j,e in enumerate(rs) if
e == ranks[nmd_rnks[-1]]][0]
unnmd_rnk -= len(rs)
unnmd_rnks.append(unnmd_rnk)
# calculate bool
unnmd_rnks = [e if nmd_rnks[j] == min(nmd_rnks) else 0 for
j,e in enumerate(unnmd_rnks)]
bool_rank = [1 if e == min(unnmd_rnks) else 0 for e in
unnmd_rnks]
results = boolResults(results, bool_rank)
results = boolResults(results, [], rand=True)
record = writeAsJson(term, results)
sieved.append(record)
return sieved
def write(self):
"""Write csv file of resolved names and txt file of unresolved names.
"""
csv_file = os.path.join(self.outdir, 'search_results.csv')
txt_file = os.path.join(self.outdir, 'unresolved.txt')
headers = self.key_terms
unresolved = []
with open(csv_file, 'w') as file:
writer = csv.writer(file)
writer.writerow(headers)
for key in list(self._store.keys()):
results = self._store[key]
if len(results) == 0:
unresolved.append(key)
else:
row = [key]
for key_term in headers[1:]:
element = results[0][key_term]
# GNR returns UTF-8, csv requires ascii
#
# *** Note ***
# According to all docs for csv versions >= 2.6, csv
# can handle either UTF-8 or ascii, just not Unicode.
# In py3, the following two lines result in csv printing
# the element with a bitstring. If GNR is actually
# returning UTF-8, it seems easiest to just drop these
# if 'encode' in dir(element):
# element = element.encode('ascii')
row.append(element)
writer.writerow(row)
if len(unresolved) > 0:
with open(txt_file, 'w') as file:
for name in unresolved:
file.write("{0}\n".format(name))
def retrieve(self, key_term):
"""Return data for key term specified for each resolved name as a list.
Possible terms (02/12/2013): 'query_name', 'classification_path',
'data_source_title', 'match_type', 'score', 'classification_path_ranks',
'name_string', 'canonical_form',\
'classification_path_ids', 'prescore', 'data_source_id', 'taxon_id',
'gni_uuid'"""
if key_term not in self.key_terms:
raise IndexError('Term given is invalid! Check doc string for \
valid terms.')
store = self._store
retrieved = []
for key in list(store.keys()):
# take copy, so changes made to the returned list do not affect
# store
record = copy.deepcopy(store[key])
if len(record) > 0:
if key_term == 'query_name':
retrieved.append(key)
else:
retrieved.append(record[0][key_term])
if re.search('path', key_term):
retrieved = [[r2 for r2 in r1.split('|')[1:]] for r1 in retrieved]
return retrieved
|
DomBennett/TaxonNamesResolver
|
taxon_names_resolver/resolver.py
|
Resolver.main
|
python
|
def main(self):
# TODO: Break up, too complex
primary_bool = True
no_records = True
nsearch = 1
search_terms = self.terms
original_names = []
while True:
if primary_bool:
self.logger.info('Searching [{0}] ...'.format(
self.primary_datasource))
else:
self.logger.info('Searching other datasources ...')
res = self._res.search(search_terms, prelim=primary_bool)
if nsearch > 2 and res:
# if second search failed, look up alternative names
for each_res, original_name in zip(res, original_names):
each_res['supplied_name_string'] = original_name
self._store.add(res)
# Check for returns without records
no_records = self._count(nrecords=1)
if no_records:
if nsearch == 1:
primary_bool = False
elif nsearch == 2:
original_names = no_records
# genus names
no_records = [e.split()[0] for e in no_records]
primary_bool = True
elif nsearch == 3:
original_names = no_records
no_records = [e.split()[0] for e in no_records]
primary_bool = False
else:
break
else:
break
nsearch += 1
search_terms = no_records
# Check for multiple records
multi_records = self._count(greater=True, nrecords=1)
if multi_records:
self.logger.info('Choosing best records to return ...')
res = self._sieve(multi_records)
self._store.replace(res)
|
Search and sieve query names.
|
train
|
https://github.com/DomBennett/TaxonNamesResolver/blob/a2556cc0f8b7442d83990715c92fdf6f787e1f41/taxon_names_resolver/resolver.py#L82-L127
|
[
" def search(self, terms, prelim=True):\n \"\"\"Search terms against GNR. If prelim = False, search other datasources \\\nfor alternative names (i.e. synonyms) with which to search main datasource.\\\nReturn JSON object.\"\"\"\n # TODO: There are now lots of additional data sources, make additional\n # searching optional (11/01/2017)\n if prelim: # preliminary search\n res = self._resolve(terms, self.Id)\n self._write(res)\n return res\n else: # search other DSs for alt names, search DS with these\n # quick fix: https://github.com/DomBennett/TaxonNamesResolver/issues/5\n # seems to be due to limit on number of ids in single request\n # switiching to a for loop for each data source\n # appending all results into single res\n res = []\n for ds_id in self.otherIds:\n tmp = self._resolve(terms, [ds_id])\n res.append(tmp[0])\n self._write(res)\n alt_terms = self._parseNames(res)\n if len(alt_terms) == 0:\n return False\n else:\n # search the main source again with alt_terms\n # replace names in json\n terms = [each[1] for each in alt_terms] # unzip\n res = self._resolve(terms, self.Id)\n self._write(res)\n alt_res = self._replaceSupStrNames(res, alt_terms)\n return alt_res\n",
"def add(self, jobj):\n if not isinstance(jobj, bool):\n for record in jobj:\n term = record['supplied_name_string']\n try:\n if 'results' in list(record.keys()):\n results = self._filter(record['results'])\n self[term].extend(results)\n except KeyError:\n self.logger.debug('JSON object contains terms not in self.logger')\n",
"def replace(self, jobj):\n for record in jobj:\n term = record['supplied_name_string']\n try:\n if 'results' in list(record.keys()):\n results = self._filter(record['results'])\n self[term] = results\n else:\n self[term] = []\n except KeyError:\n self.logger.debug('JSON object contains terms not in GnrStore')\n",
"def _count(self, greater=False, nrecords=0):\n # return a list of all keys that have records\n # greater or less than nrecords\n GnrStore = self._store\n assessed = []\n lens = [len(GnrStore[key]) for key in list(GnrStore.keys())]\n if greater:\n len_bools = [each > nrecords for each in lens]\n else:\n len_bools = [each < nrecords for each in lens]\n for i, key in enumerate(GnrStore.keys()):\n if len_bools[i]:\n assessed.append(key)\n if len(assessed) == 0:\n return False\n else:\n return assessed\n",
" def _sieve(self, multiple_records):\n \"\"\"Return json object without multiple returns per resolved name.\\\nNames with multiple records are reduced by finding the name in the clade of\\\ninterest, have the highest score, have the lowest taxonomic rank (if lowrank is\ntrue) and/or are the first item returned.\"\"\"\n # TODO: Break up, too complex\n GnrStore = self._store\n\n def writeAsJson(term, results):\n record = {'supplied_name_string': term}\n if len(results) > 0:\n record['results'] = results\n return record\n\n def boolResults(results, bool_li, rand=False):\n if rand:\n # choose first record (most likely best?)\n results = [results[0]]\n elif sum(bool_li) == 1:\n results = [results[bool_li.index(1)]]\n elif sum(bool_li) == 0:\n # return 'no_record'\n return []\n else:\n results = [result for i, result in enumerate(results) if\n bool_li[i]]\n return results\n\n sieved = []\n ranks = ['species', 'genus', 'family', 'order', 'superorder', 'class',\n 'superclass', 'subphylum', 'phylum', 'kingdom',\n 'superkingdom']\n for term in multiple_records:\n results = GnrStore[term]\n while len(results) > 1:\n # choose result with best score\n scores = [result['score'] for result in results]\n bool_score = [1 if score == max(scores) else 0 for score in\n scores]\n results = boolResults(results, bool_score)\n # choose result resolved to lowest taxonomic rank\n if self.lowrank:\n res_ranks = [result['classification_path_ranks'].\n split('|') for result in results]\n # calculate 'rank scores' for named and un-named ranks\n nmd_rnks = []\n unnmd_rnks = []\n for rs in res_ranks:\n nmd_rnks.append(min([j for j,e in enumerate(ranks) if\n e in rs]))\n unnmd_rnk = [j for j,e in enumerate(rs) if\n e == ranks[nmd_rnks[-1]]][0]\n unnmd_rnk -= len(rs)\n unnmd_rnks.append(unnmd_rnk)\n # calculate bool\n unnmd_rnks = [e if nmd_rnks[j] == min(nmd_rnks) else 0 for\n j,e in enumerate(unnmd_rnks)]\n bool_rank = [1 if e == min(unnmd_rnks) else 0 for e in\n unnmd_rnks]\n results = boolResults(results, bool_rank)\n results = boolResults(results, [], rand=True)\n record = writeAsJson(term, results)\n sieved.append(record)\n return sieved\n"
] |
class Resolver(object):
"""Taxon Names Resovler class : Automatically resolves taxon names \
through GNR. All output written in 'resolved_names' folder.
See https://github.com/DomBennett/TaxonNamesResolver for details."""
def __init__(self, input_file=None, datasource='NCBI', taxon_id=None,
terms=None, lowrank=False, logger=logging.getLogger('')):
# add logger
self.logger = logger
# organising dirs
self.directory = os.getcwd()
self.outdir = os.path.join(self.directory, 'resolved_names')
if not os.path.exists(self.outdir):
os.makedirs(self.outdir)
if input_file:
input_file = os.path.join(self.directory, input_file)
# reading in terms
terms = []
with open(input_file) as names:
for name in names:
terms.append(name.strip())
terms = [term for term in terms if not term == '']
else:
if not terms:
self.logger.info("No terms provided")
terms = list(set(terms))
self.logger.info('Found [{0}] taxon names to search in input file... '.
format(len(terms)))
self.logger.info('... of which [{0}] are unique.'.format(len(terms)))
# init dep classes
self._check(terms)
self.terms = terms
self._res = GnrResolver(logger=self.logger, datasource=datasource)
self.primary_datasource = datasource
self._store = GnrStore(terms, tax_group=taxon_id, logger=self.logger)
# http://resolver.globalnames.org/api
self.key_terms = ['query_name', 'classification_path',
'data_source_title', 'match_type', 'score',
'classification_path_ranks', 'name_string',
'canonical_form', 'classification_path_ids',
'prescore', 'data_source_id', 'taxon_id', 'gni_uuid']
self.lowrank = lowrank # return lowest ranked match
# self.tnr_obj = [] # this will hold all output
def _check(self, terms):
"""Check terms do not contain unknown characters"""
for t in terms:
try:
_ = urllib.parse.quote(six.text_type(t).encode('utf8'))
except:
self.logger.error('Unknown character in [{0}]!'.format(t))
self.logger.error('.... remove character and try again.')
raise EncodingError
# def extract(self, what): # depends on tnr
# lkeys = ['qnames', 'rnames', 'taxonids', 'ranks']
# i = [i for i, each in enumerate(lkeys) if what is each][0]
# res = [each[i] for each in self.tnr_obj]
# return res
def _readInJson(self):
jobj_file = os.path.join(self.outdir, 'prelim_search_results.json')
with open(jobj_file, 'r') as infile:
jobj = json.load(infile)
return jobj
def _count(self, greater=False, nrecords=0):
# return a list of all keys that have records
# greater or less than nrecords
GnrStore = self._store
assessed = []
lens = [len(GnrStore[key]) for key in list(GnrStore.keys())]
if greater:
len_bools = [each > nrecords for each in lens]
else:
len_bools = [each < nrecords for each in lens]
for i, key in enumerate(GnrStore.keys()):
if len_bools[i]:
assessed.append(key)
if len(assessed) == 0:
return False
else:
return assessed
def _sieve(self, multiple_records):
"""Return json object without multiple returns per resolved name.\
Names with multiple records are reduced by finding the name in the clade of\
interest, have the highest score, have the lowest taxonomic rank (if lowrank is
true) and/or are the first item returned."""
# TODO: Break up, too complex
GnrStore = self._store
def writeAsJson(term, results):
record = {'supplied_name_string': term}
if len(results) > 0:
record['results'] = results
return record
def boolResults(results, bool_li, rand=False):
if rand:
# choose first record (most likely best?)
results = [results[0]]
elif sum(bool_li) == 1:
results = [results[bool_li.index(1)]]
elif sum(bool_li) == 0:
# return 'no_record'
return []
else:
results = [result for i, result in enumerate(results) if
bool_li[i]]
return results
sieved = []
ranks = ['species', 'genus', 'family', 'order', 'superorder', 'class',
'superclass', 'subphylum', 'phylum', 'kingdom',
'superkingdom']
for term in multiple_records:
results = GnrStore[term]
while len(results) > 1:
# choose result with best score
scores = [result['score'] for result in results]
bool_score = [1 if score == max(scores) else 0 for score in
scores]
results = boolResults(results, bool_score)
# choose result resolved to lowest taxonomic rank
if self.lowrank:
res_ranks = [result['classification_path_ranks'].
split('|') for result in results]
# calculate 'rank scores' for named and un-named ranks
nmd_rnks = []
unnmd_rnks = []
for rs in res_ranks:
nmd_rnks.append(min([j for j,e in enumerate(ranks) if
e in rs]))
unnmd_rnk = [j for j,e in enumerate(rs) if
e == ranks[nmd_rnks[-1]]][0]
unnmd_rnk -= len(rs)
unnmd_rnks.append(unnmd_rnk)
# calculate bool
unnmd_rnks = [e if nmd_rnks[j] == min(nmd_rnks) else 0 for
j,e in enumerate(unnmd_rnks)]
bool_rank = [1 if e == min(unnmd_rnks) else 0 for e in
unnmd_rnks]
results = boolResults(results, bool_rank)
results = boolResults(results, [], rand=True)
record = writeAsJson(term, results)
sieved.append(record)
return sieved
def write(self):
"""Write csv file of resolved names and txt file of unresolved names.
"""
csv_file = os.path.join(self.outdir, 'search_results.csv')
txt_file = os.path.join(self.outdir, 'unresolved.txt')
headers = self.key_terms
unresolved = []
with open(csv_file, 'w') as file:
writer = csv.writer(file)
writer.writerow(headers)
for key in list(self._store.keys()):
results = self._store[key]
if len(results) == 0:
unresolved.append(key)
else:
row = [key]
for key_term in headers[1:]:
element = results[0][key_term]
# GNR returns UTF-8, csv requires ascii
#
# *** Note ***
# According to all docs for csv versions >= 2.6, csv
# can handle either UTF-8 or ascii, just not Unicode.
# In py3, the following two lines result in csv printing
# the element with a bitstring. If GNR is actually
# returning UTF-8, it seems easiest to just drop these
# if 'encode' in dir(element):
# element = element.encode('ascii')
row.append(element)
writer.writerow(row)
if len(unresolved) > 0:
with open(txt_file, 'w') as file:
for name in unresolved:
file.write("{0}\n".format(name))
def retrieve(self, key_term):
"""Return data for key term specified for each resolved name as a list.
Possible terms (02/12/2013): 'query_name', 'classification_path',
'data_source_title', 'match_type', 'score', 'classification_path_ranks',
'name_string', 'canonical_form',\
'classification_path_ids', 'prescore', 'data_source_id', 'taxon_id',
'gni_uuid'"""
if key_term not in self.key_terms:
raise IndexError('Term given is invalid! Check doc string for \
valid terms.')
store = self._store
retrieved = []
for key in list(store.keys()):
# take copy, so changes made to the returned list do not affect
# store
record = copy.deepcopy(store[key])
if len(record) > 0:
if key_term == 'query_name':
retrieved.append(key)
else:
retrieved.append(record[0][key_term])
if re.search('path', key_term):
retrieved = [[r2 for r2 in r1.split('|')[1:]] for r1 in retrieved]
return retrieved
|
DomBennett/TaxonNamesResolver
|
taxon_names_resolver/resolver.py
|
Resolver._sieve
|
python
|
def _sieve(self, multiple_records):
# TODO: Break up, too complex
GnrStore = self._store
def writeAsJson(term, results):
record = {'supplied_name_string': term}
if len(results) > 0:
record['results'] = results
return record
def boolResults(results, bool_li, rand=False):
if rand:
# choose first record (most likely best?)
results = [results[0]]
elif sum(bool_li) == 1:
results = [results[bool_li.index(1)]]
elif sum(bool_li) == 0:
# return 'no_record'
return []
else:
results = [result for i, result in enumerate(results) if
bool_li[i]]
return results
sieved = []
ranks = ['species', 'genus', 'family', 'order', 'superorder', 'class',
'superclass', 'subphylum', 'phylum', 'kingdom',
'superkingdom']
for term in multiple_records:
results = GnrStore[term]
while len(results) > 1:
# choose result with best score
scores = [result['score'] for result in results]
bool_score = [1 if score == max(scores) else 0 for score in
scores]
results = boolResults(results, bool_score)
# choose result resolved to lowest taxonomic rank
if self.lowrank:
res_ranks = [result['classification_path_ranks'].
split('|') for result in results]
# calculate 'rank scores' for named and un-named ranks
nmd_rnks = []
unnmd_rnks = []
for rs in res_ranks:
nmd_rnks.append(min([j for j,e in enumerate(ranks) if
e in rs]))
unnmd_rnk = [j for j,e in enumerate(rs) if
e == ranks[nmd_rnks[-1]]][0]
unnmd_rnk -= len(rs)
unnmd_rnks.append(unnmd_rnk)
# calculate bool
unnmd_rnks = [e if nmd_rnks[j] == min(nmd_rnks) else 0 for
j,e in enumerate(unnmd_rnks)]
bool_rank = [1 if e == min(unnmd_rnks) else 0 for e in
unnmd_rnks]
results = boolResults(results, bool_rank)
results = boolResults(results, [], rand=True)
record = writeAsJson(term, results)
sieved.append(record)
return sieved
|
Return json object without multiple returns per resolved name.\
Names with multiple records are reduced by finding the name in the clade of\
interest, have the highest score, have the lowest taxonomic rank (if lowrank is
true) and/or are the first item returned.
|
train
|
https://github.com/DomBennett/TaxonNamesResolver/blob/a2556cc0f8b7442d83990715c92fdf6f787e1f41/taxon_names_resolver/resolver.py#L159-L222
| null |
class Resolver(object):
"""Taxon Names Resovler class : Automatically resolves taxon names \
through GNR. All output written in 'resolved_names' folder.
See https://github.com/DomBennett/TaxonNamesResolver for details."""
def __init__(self, input_file=None, datasource='NCBI', taxon_id=None,
terms=None, lowrank=False, logger=logging.getLogger('')):
# add logger
self.logger = logger
# organising dirs
self.directory = os.getcwd()
self.outdir = os.path.join(self.directory, 'resolved_names')
if not os.path.exists(self.outdir):
os.makedirs(self.outdir)
if input_file:
input_file = os.path.join(self.directory, input_file)
# reading in terms
terms = []
with open(input_file) as names:
for name in names:
terms.append(name.strip())
terms = [term for term in terms if not term == '']
else:
if not terms:
self.logger.info("No terms provided")
terms = list(set(terms))
self.logger.info('Found [{0}] taxon names to search in input file... '.
format(len(terms)))
self.logger.info('... of which [{0}] are unique.'.format(len(terms)))
# init dep classes
self._check(terms)
self.terms = terms
self._res = GnrResolver(logger=self.logger, datasource=datasource)
self.primary_datasource = datasource
self._store = GnrStore(terms, tax_group=taxon_id, logger=self.logger)
# http://resolver.globalnames.org/api
self.key_terms = ['query_name', 'classification_path',
'data_source_title', 'match_type', 'score',
'classification_path_ranks', 'name_string',
'canonical_form', 'classification_path_ids',
'prescore', 'data_source_id', 'taxon_id', 'gni_uuid']
self.lowrank = lowrank # return lowest ranked match
# self.tnr_obj = [] # this will hold all output
def _check(self, terms):
"""Check terms do not contain unknown characters"""
for t in terms:
try:
_ = urllib.parse.quote(six.text_type(t).encode('utf8'))
except:
self.logger.error('Unknown character in [{0}]!'.format(t))
self.logger.error('.... remove character and try again.')
raise EncodingError
def main(self):
"""Search and sieve query names."""
# TODO: Break up, too complex
primary_bool = True
no_records = True
nsearch = 1
search_terms = self.terms
original_names = []
while True:
if primary_bool:
self.logger.info('Searching [{0}] ...'.format(
self.primary_datasource))
else:
self.logger.info('Searching other datasources ...')
res = self._res.search(search_terms, prelim=primary_bool)
if nsearch > 2 and res:
# if second search failed, look up alternative names
for each_res, original_name in zip(res, original_names):
each_res['supplied_name_string'] = original_name
self._store.add(res)
# Check for returns without records
no_records = self._count(nrecords=1)
if no_records:
if nsearch == 1:
primary_bool = False
elif nsearch == 2:
original_names = no_records
# genus names
no_records = [e.split()[0] for e in no_records]
primary_bool = True
elif nsearch == 3:
original_names = no_records
no_records = [e.split()[0] for e in no_records]
primary_bool = False
else:
break
else:
break
nsearch += 1
search_terms = no_records
# Check for multiple records
multi_records = self._count(greater=True, nrecords=1)
if multi_records:
self.logger.info('Choosing best records to return ...')
res = self._sieve(multi_records)
self._store.replace(res)
# def extract(self, what): # depends on tnr
# lkeys = ['qnames', 'rnames', 'taxonids', 'ranks']
# i = [i for i, each in enumerate(lkeys) if what is each][0]
# res = [each[i] for each in self.tnr_obj]
# return res
def _readInJson(self):
jobj_file = os.path.join(self.outdir, 'prelim_search_results.json')
with open(jobj_file, 'r') as infile:
jobj = json.load(infile)
return jobj
def _count(self, greater=False, nrecords=0):
# return a list of all keys that have records
# greater or less than nrecords
GnrStore = self._store
assessed = []
lens = [len(GnrStore[key]) for key in list(GnrStore.keys())]
if greater:
len_bools = [each > nrecords for each in lens]
else:
len_bools = [each < nrecords for each in lens]
for i, key in enumerate(GnrStore.keys()):
if len_bools[i]:
assessed.append(key)
if len(assessed) == 0:
return False
else:
return assessed
def write(self):
"""Write csv file of resolved names and txt file of unresolved names.
"""
csv_file = os.path.join(self.outdir, 'search_results.csv')
txt_file = os.path.join(self.outdir, 'unresolved.txt')
headers = self.key_terms
unresolved = []
with open(csv_file, 'w') as file:
writer = csv.writer(file)
writer.writerow(headers)
for key in list(self._store.keys()):
results = self._store[key]
if len(results) == 0:
unresolved.append(key)
else:
row = [key]
for key_term in headers[1:]:
element = results[0][key_term]
# GNR returns UTF-8, csv requires ascii
#
# *** Note ***
# According to all docs for csv versions >= 2.6, csv
# can handle either UTF-8 or ascii, just not Unicode.
# In py3, the following two lines result in csv printing
# the element with a bitstring. If GNR is actually
# returning UTF-8, it seems easiest to just drop these
# if 'encode' in dir(element):
# element = element.encode('ascii')
row.append(element)
writer.writerow(row)
if len(unresolved) > 0:
with open(txt_file, 'w') as file:
for name in unresolved:
file.write("{0}\n".format(name))
def retrieve(self, key_term):
"""Return data for key term specified for each resolved name as a list.
Possible terms (02/12/2013): 'query_name', 'classification_path',
'data_source_title', 'match_type', 'score', 'classification_path_ranks',
'name_string', 'canonical_form',\
'classification_path_ids', 'prescore', 'data_source_id', 'taxon_id',
'gni_uuid'"""
if key_term not in self.key_terms:
raise IndexError('Term given is invalid! Check doc string for \
valid terms.')
store = self._store
retrieved = []
for key in list(store.keys()):
# take copy, so changes made to the returned list do not affect
# store
record = copy.deepcopy(store[key])
if len(record) > 0:
if key_term == 'query_name':
retrieved.append(key)
else:
retrieved.append(record[0][key_term])
if re.search('path', key_term):
retrieved = [[r2 for r2 in r1.split('|')[1:]] for r1 in retrieved]
return retrieved
|
DomBennett/TaxonNamesResolver
|
taxon_names_resolver/resolver.py
|
Resolver.write
|
python
|
def write(self):
csv_file = os.path.join(self.outdir, 'search_results.csv')
txt_file = os.path.join(self.outdir, 'unresolved.txt')
headers = self.key_terms
unresolved = []
with open(csv_file, 'w') as file:
writer = csv.writer(file)
writer.writerow(headers)
for key in list(self._store.keys()):
results = self._store[key]
if len(results) == 0:
unresolved.append(key)
else:
row = [key]
for key_term in headers[1:]:
element = results[0][key_term]
# GNR returns UTF-8, csv requires ascii
#
# *** Note ***
# According to all docs for csv versions >= 2.6, csv
# can handle either UTF-8 or ascii, just not Unicode.
# In py3, the following two lines result in csv printing
# the element with a bitstring. If GNR is actually
# returning UTF-8, it seems easiest to just drop these
# if 'encode' in dir(element):
# element = element.encode('ascii')
row.append(element)
writer.writerow(row)
if len(unresolved) > 0:
with open(txt_file, 'w') as file:
for name in unresolved:
file.write("{0}\n".format(name))
|
Write csv file of resolved names and txt file of unresolved names.
|
train
|
https://github.com/DomBennett/TaxonNamesResolver/blob/a2556cc0f8b7442d83990715c92fdf6f787e1f41/taxon_names_resolver/resolver.py#L224-L258
| null |
class Resolver(object):
"""Taxon Names Resovler class : Automatically resolves taxon names \
through GNR. All output written in 'resolved_names' folder.
See https://github.com/DomBennett/TaxonNamesResolver for details."""
def __init__(self, input_file=None, datasource='NCBI', taxon_id=None,
terms=None, lowrank=False, logger=logging.getLogger('')):
# add logger
self.logger = logger
# organising dirs
self.directory = os.getcwd()
self.outdir = os.path.join(self.directory, 'resolved_names')
if not os.path.exists(self.outdir):
os.makedirs(self.outdir)
if input_file:
input_file = os.path.join(self.directory, input_file)
# reading in terms
terms = []
with open(input_file) as names:
for name in names:
terms.append(name.strip())
terms = [term for term in terms if not term == '']
else:
if not terms:
self.logger.info("No terms provided")
terms = list(set(terms))
self.logger.info('Found [{0}] taxon names to search in input file... '.
format(len(terms)))
self.logger.info('... of which [{0}] are unique.'.format(len(terms)))
# init dep classes
self._check(terms)
self.terms = terms
self._res = GnrResolver(logger=self.logger, datasource=datasource)
self.primary_datasource = datasource
self._store = GnrStore(terms, tax_group=taxon_id, logger=self.logger)
# http://resolver.globalnames.org/api
self.key_terms = ['query_name', 'classification_path',
'data_source_title', 'match_type', 'score',
'classification_path_ranks', 'name_string',
'canonical_form', 'classification_path_ids',
'prescore', 'data_source_id', 'taxon_id', 'gni_uuid']
self.lowrank = lowrank # return lowest ranked match
# self.tnr_obj = [] # this will hold all output
def _check(self, terms):
"""Check terms do not contain unknown characters"""
for t in terms:
try:
_ = urllib.parse.quote(six.text_type(t).encode('utf8'))
except:
self.logger.error('Unknown character in [{0}]!'.format(t))
self.logger.error('.... remove character and try again.')
raise EncodingError
def main(self):
"""Search and sieve query names."""
# TODO: Break up, too complex
primary_bool = True
no_records = True
nsearch = 1
search_terms = self.terms
original_names = []
while True:
if primary_bool:
self.logger.info('Searching [{0}] ...'.format(
self.primary_datasource))
else:
self.logger.info('Searching other datasources ...')
res = self._res.search(search_terms, prelim=primary_bool)
if nsearch > 2 and res:
# if second search failed, look up alternative names
for each_res, original_name in zip(res, original_names):
each_res['supplied_name_string'] = original_name
self._store.add(res)
# Check for returns without records
no_records = self._count(nrecords=1)
if no_records:
if nsearch == 1:
primary_bool = False
elif nsearch == 2:
original_names = no_records
# genus names
no_records = [e.split()[0] for e in no_records]
primary_bool = True
elif nsearch == 3:
original_names = no_records
no_records = [e.split()[0] for e in no_records]
primary_bool = False
else:
break
else:
break
nsearch += 1
search_terms = no_records
# Check for multiple records
multi_records = self._count(greater=True, nrecords=1)
if multi_records:
self.logger.info('Choosing best records to return ...')
res = self._sieve(multi_records)
self._store.replace(res)
# def extract(self, what): # depends on tnr
# lkeys = ['qnames', 'rnames', 'taxonids', 'ranks']
# i = [i for i, each in enumerate(lkeys) if what is each][0]
# res = [each[i] for each in self.tnr_obj]
# return res
def _readInJson(self):
jobj_file = os.path.join(self.outdir, 'prelim_search_results.json')
with open(jobj_file, 'r') as infile:
jobj = json.load(infile)
return jobj
def _count(self, greater=False, nrecords=0):
# return a list of all keys that have records
# greater or less than nrecords
GnrStore = self._store
assessed = []
lens = [len(GnrStore[key]) for key in list(GnrStore.keys())]
if greater:
len_bools = [each > nrecords for each in lens]
else:
len_bools = [each < nrecords for each in lens]
for i, key in enumerate(GnrStore.keys()):
if len_bools[i]:
assessed.append(key)
if len(assessed) == 0:
return False
else:
return assessed
def _sieve(self, multiple_records):
"""Return json object without multiple returns per resolved name.\
Names with multiple records are reduced by finding the name in the clade of\
interest, have the highest score, have the lowest taxonomic rank (if lowrank is
true) and/or are the first item returned."""
# TODO: Break up, too complex
GnrStore = self._store
def writeAsJson(term, results):
record = {'supplied_name_string': term}
if len(results) > 0:
record['results'] = results
return record
def boolResults(results, bool_li, rand=False):
if rand:
# choose first record (most likely best?)
results = [results[0]]
elif sum(bool_li) == 1:
results = [results[bool_li.index(1)]]
elif sum(bool_li) == 0:
# return 'no_record'
return []
else:
results = [result for i, result in enumerate(results) if
bool_li[i]]
return results
sieved = []
ranks = ['species', 'genus', 'family', 'order', 'superorder', 'class',
'superclass', 'subphylum', 'phylum', 'kingdom',
'superkingdom']
for term in multiple_records:
results = GnrStore[term]
while len(results) > 1:
# choose result with best score
scores = [result['score'] for result in results]
bool_score = [1 if score == max(scores) else 0 for score in
scores]
results = boolResults(results, bool_score)
# choose result resolved to lowest taxonomic rank
if self.lowrank:
res_ranks = [result['classification_path_ranks'].
split('|') for result in results]
# calculate 'rank scores' for named and un-named ranks
nmd_rnks = []
unnmd_rnks = []
for rs in res_ranks:
nmd_rnks.append(min([j for j,e in enumerate(ranks) if
e in rs]))
unnmd_rnk = [j for j,e in enumerate(rs) if
e == ranks[nmd_rnks[-1]]][0]
unnmd_rnk -= len(rs)
unnmd_rnks.append(unnmd_rnk)
# calculate bool
unnmd_rnks = [e if nmd_rnks[j] == min(nmd_rnks) else 0 for
j,e in enumerate(unnmd_rnks)]
bool_rank = [1 if e == min(unnmd_rnks) else 0 for e in
unnmd_rnks]
results = boolResults(results, bool_rank)
results = boolResults(results, [], rand=True)
record = writeAsJson(term, results)
sieved.append(record)
return sieved
def retrieve(self, key_term):
"""Return data for key term specified for each resolved name as a list.
Possible terms (02/12/2013): 'query_name', 'classification_path',
'data_source_title', 'match_type', 'score', 'classification_path_ranks',
'name_string', 'canonical_form',\
'classification_path_ids', 'prescore', 'data_source_id', 'taxon_id',
'gni_uuid'"""
if key_term not in self.key_terms:
raise IndexError('Term given is invalid! Check doc string for \
valid terms.')
store = self._store
retrieved = []
for key in list(store.keys()):
# take copy, so changes made to the returned list do not affect
# store
record = copy.deepcopy(store[key])
if len(record) > 0:
if key_term == 'query_name':
retrieved.append(key)
else:
retrieved.append(record[0][key_term])
if re.search('path', key_term):
retrieved = [[r2 for r2 in r1.split('|')[1:]] for r1 in retrieved]
return retrieved
|
DomBennett/TaxonNamesResolver
|
taxon_names_resolver/resolver.py
|
Resolver.retrieve
|
python
|
def retrieve(self, key_term):
"""Return data for key term specified for each resolved name as a list.
Possible terms (02/12/2013): 'query_name', 'classification_path',
'data_source_title', 'match_type', 'score', 'classification_path_ranks',
'name_string', 'canonical_form',\
'classification_path_ids', 'prescore', 'data_source_id', 'taxon_id',
'gni_uuid'"""
if key_term not in self.key_terms:
raise IndexError('Term given is invalid! Check doc string for \
valid terms.')
store = self._store
retrieved = []
for key in list(store.keys()):
# take copy, so changes made to the returned list do not affect
# store
record = copy.deepcopy(store[key])
if len(record) > 0:
if key_term == 'query_name':
retrieved.append(key)
else:
retrieved.append(record[0][key_term])
if re.search('path', key_term):
retrieved = [[r2 for r2 in r1.split('|')[1:]] for r1 in retrieved]
return retrieved
|
Return data for key term specified for each resolved name as a list.
Possible terms (02/12/2013): 'query_name', 'classification_path',
'data_source_title', 'match_type', 'score', 'classification_path_ranks',
'name_string', 'canonical_form',\
'classification_path_ids', 'prescore', 'data_source_id', 'taxon_id',
'gni_uuid
|
train
|
https://github.com/DomBennett/TaxonNamesResolver/blob/a2556cc0f8b7442d83990715c92fdf6f787e1f41/taxon_names_resolver/resolver.py#L260-L283
| null |
class Resolver(object):
"""Taxon Names Resovler class : Automatically resolves taxon names \
through GNR. All output written in 'resolved_names' folder.
See https://github.com/DomBennett/TaxonNamesResolver for details."""
def __init__(self, input_file=None, datasource='NCBI', taxon_id=None,
terms=None, lowrank=False, logger=logging.getLogger('')):
# add logger
self.logger = logger
# organising dirs
self.directory = os.getcwd()
self.outdir = os.path.join(self.directory, 'resolved_names')
if not os.path.exists(self.outdir):
os.makedirs(self.outdir)
if input_file:
input_file = os.path.join(self.directory, input_file)
# reading in terms
terms = []
with open(input_file) as names:
for name in names:
terms.append(name.strip())
terms = [term for term in terms if not term == '']
else:
if not terms:
self.logger.info("No terms provided")
terms = list(set(terms))
self.logger.info('Found [{0}] taxon names to search in input file... '.
format(len(terms)))
self.logger.info('... of which [{0}] are unique.'.format(len(terms)))
# init dep classes
self._check(terms)
self.terms = terms
self._res = GnrResolver(logger=self.logger, datasource=datasource)
self.primary_datasource = datasource
self._store = GnrStore(terms, tax_group=taxon_id, logger=self.logger)
# http://resolver.globalnames.org/api
self.key_terms = ['query_name', 'classification_path',
'data_source_title', 'match_type', 'score',
'classification_path_ranks', 'name_string',
'canonical_form', 'classification_path_ids',
'prescore', 'data_source_id', 'taxon_id', 'gni_uuid']
self.lowrank = lowrank # return lowest ranked match
# self.tnr_obj = [] # this will hold all output
def _check(self, terms):
"""Check terms do not contain unknown characters"""
for t in terms:
try:
_ = urllib.parse.quote(six.text_type(t).encode('utf8'))
except:
self.logger.error('Unknown character in [{0}]!'.format(t))
self.logger.error('.... remove character and try again.')
raise EncodingError
def main(self):
"""Search and sieve query names."""
# TODO: Break up, too complex
primary_bool = True
no_records = True
nsearch = 1
search_terms = self.terms
original_names = []
while True:
if primary_bool:
self.logger.info('Searching [{0}] ...'.format(
self.primary_datasource))
else:
self.logger.info('Searching other datasources ...')
res = self._res.search(search_terms, prelim=primary_bool)
if nsearch > 2 and res:
# if second search failed, look up alternative names
for each_res, original_name in zip(res, original_names):
each_res['supplied_name_string'] = original_name
self._store.add(res)
# Check for returns without records
no_records = self._count(nrecords=1)
if no_records:
if nsearch == 1:
primary_bool = False
elif nsearch == 2:
original_names = no_records
# genus names
no_records = [e.split()[0] for e in no_records]
primary_bool = True
elif nsearch == 3:
original_names = no_records
no_records = [e.split()[0] for e in no_records]
primary_bool = False
else:
break
else:
break
nsearch += 1
search_terms = no_records
# Check for multiple records
multi_records = self._count(greater=True, nrecords=1)
if multi_records:
self.logger.info('Choosing best records to return ...')
res = self._sieve(multi_records)
self._store.replace(res)
# def extract(self, what): # depends on tnr
# lkeys = ['qnames', 'rnames', 'taxonids', 'ranks']
# i = [i for i, each in enumerate(lkeys) if what is each][0]
# res = [each[i] for each in self.tnr_obj]
# return res
def _readInJson(self):
jobj_file = os.path.join(self.outdir, 'prelim_search_results.json')
with open(jobj_file, 'r') as infile:
jobj = json.load(infile)
return jobj
def _count(self, greater=False, nrecords=0):
# return a list of all keys that have records
# greater or less than nrecords
GnrStore = self._store
assessed = []
lens = [len(GnrStore[key]) for key in list(GnrStore.keys())]
if greater:
len_bools = [each > nrecords for each in lens]
else:
len_bools = [each < nrecords for each in lens]
for i, key in enumerate(GnrStore.keys()):
if len_bools[i]:
assessed.append(key)
if len(assessed) == 0:
return False
else:
return assessed
def _sieve(self, multiple_records):
"""Return json object without multiple returns per resolved name.\
Names with multiple records are reduced by finding the name in the clade of\
interest, have the highest score, have the lowest taxonomic rank (if lowrank is
true) and/or are the first item returned."""
# TODO: Break up, too complex
GnrStore = self._store
def writeAsJson(term, results):
record = {'supplied_name_string': term}
if len(results) > 0:
record['results'] = results
return record
def boolResults(results, bool_li, rand=False):
if rand:
# choose first record (most likely best?)
results = [results[0]]
elif sum(bool_li) == 1:
results = [results[bool_li.index(1)]]
elif sum(bool_li) == 0:
# return 'no_record'
return []
else:
results = [result for i, result in enumerate(results) if
bool_li[i]]
return results
sieved = []
ranks = ['species', 'genus', 'family', 'order', 'superorder', 'class',
'superclass', 'subphylum', 'phylum', 'kingdom',
'superkingdom']
for term in multiple_records:
results = GnrStore[term]
while len(results) > 1:
# choose result with best score
scores = [result['score'] for result in results]
bool_score = [1 if score == max(scores) else 0 for score in
scores]
results = boolResults(results, bool_score)
# choose result resolved to lowest taxonomic rank
if self.lowrank:
res_ranks = [result['classification_path_ranks'].
split('|') for result in results]
# calculate 'rank scores' for named and un-named ranks
nmd_rnks = []
unnmd_rnks = []
for rs in res_ranks:
nmd_rnks.append(min([j for j,e in enumerate(ranks) if
e in rs]))
unnmd_rnk = [j for j,e in enumerate(rs) if
e == ranks[nmd_rnks[-1]]][0]
unnmd_rnk -= len(rs)
unnmd_rnks.append(unnmd_rnk)
# calculate bool
unnmd_rnks = [e if nmd_rnks[j] == min(nmd_rnks) else 0 for
j,e in enumerate(unnmd_rnks)]
bool_rank = [1 if e == min(unnmd_rnks) else 0 for e in
unnmd_rnks]
results = boolResults(results, bool_rank)
results = boolResults(results, [], rand=True)
record = writeAsJson(term, results)
sieved.append(record)
return sieved
def write(self):
"""Write csv file of resolved names and txt file of unresolved names.
"""
csv_file = os.path.join(self.outdir, 'search_results.csv')
txt_file = os.path.join(self.outdir, 'unresolved.txt')
headers = self.key_terms
unresolved = []
with open(csv_file, 'w') as file:
writer = csv.writer(file)
writer.writerow(headers)
for key in list(self._store.keys()):
results = self._store[key]
if len(results) == 0:
unresolved.append(key)
else:
row = [key]
for key_term in headers[1:]:
element = results[0][key_term]
# GNR returns UTF-8, csv requires ascii
#
# *** Note ***
# According to all docs for csv versions >= 2.6, csv
# can handle either UTF-8 or ascii, just not Unicode.
# In py3, the following two lines result in csv printing
# the element with a bitstring. If GNR is actually
# returning UTF-8, it seems easiest to just drop these
# if 'encode' in dir(element):
# element = element.encode('ascii')
row.append(element)
writer.writerow(row)
if len(unresolved) > 0:
with open(txt_file, 'w') as file:
for name in unresolved:
file.write("{0}\n".format(name))
|
planetlabs/es_fluent
|
es_fluent/filters/core.py
|
Generic.add_filter
|
python
|
def add_filter(self, filter_or_string, *args, **kwargs):
self.filters.append(build_filter(filter_or_string, *args, **kwargs))
return self
|
Appends a filter.
|
train
|
https://github.com/planetlabs/es_fluent/blob/74f8db3a1bf9aa1d54512cf2d5e0ec58ee2f4b1c/es_fluent/filters/core.py#L27-L33
|
[
"def build_filter(filter_or_string, *args, **kwargs):\n \"\"\"\n Overloaded filter construction. If ``filter_or_string`` is a string\n we look up it's corresponding class in the filter registry and return it.\n Otherwise, assume ``filter_or_string`` is an instance of a filter.\n\n :return: :class:`~es_fluent.filters.Filter`\n \"\"\"\n if isinstance(filter_or_string, basestring):\n # Names that start with `~` indicate a negated filter.\n if filter_or_string.startswith('~'):\n filter_name = filter_or_string[1:]\n return ~FILTER_REGISTRY[filter_name](*args, **kwargs)\n else:\n filter_name = filter_or_string\n return FILTER_REGISTRY[filter_name](*args, **kwargs)\n else:\n return filter_or_string\n"
] |
class Generic(Filter):
"""
Contains a generic list of filters. Serialized as a dictionary.
"""
def __init__(self):
self.filters = []
def is_empty(self):
"""
:return: ``True`` if this filter has nested clauses ``False``.
"""
return all(_filter.is_empty() for _filter in self.filters)
def and_filter(self, filter_or_string, *args, **kwargs):
"""
Adds a list of :class:`~es_fluent.filters.core.And` clauses, automatically
generating :class:`~es_fluent.filters.core.And` filter if it does not
exist.
"""
and_filter = self.find_filter(And)
if and_filter is None:
and_filter = And()
self.filters.append(and_filter)
and_filter.add_filter(build_filter(
filter_or_string, *args, **kwargs))
return and_filter
def or_filter(self, filter_or_string, *args, **kwargs):
"""
Adds a list of :class:`~es_fluent.filters.core.Or` clauses, automatically
generating the an :class:`~es_fluent.filters.core.Or` filter if it does not
exist.
"""
or_filter = self.find_filter(Or)
if or_filter is None:
or_filter = Or()
self.filters.append(or_filter)
or_filter.add_filter(build_filter(
filter_or_string, *args, **kwargs
))
return or_filter
def find_filter(self, filter_cls):
"""
Find or create a filter instance of the provided ``filter_cls``. If it
is found, use remaining arguments to augment the filter otherwise
create a new instance of the desired type and add it to the
current :class:`~es_fluent.builder.QueryBuilder` accordingly.
"""
for filter_instance in self.filters:
if isinstance(filter_instance, filter_cls):
return filter_instance
return None
|
planetlabs/es_fluent
|
es_fluent/filters/core.py
|
Generic.and_filter
|
python
|
def and_filter(self, filter_or_string, *args, **kwargs):
and_filter = self.find_filter(And)
if and_filter is None:
and_filter = And()
self.filters.append(and_filter)
and_filter.add_filter(build_filter(
filter_or_string, *args, **kwargs))
return and_filter
|
Adds a list of :class:`~es_fluent.filters.core.And` clauses, automatically
generating :class:`~es_fluent.filters.core.And` filter if it does not
exist.
|
train
|
https://github.com/planetlabs/es_fluent/blob/74f8db3a1bf9aa1d54512cf2d5e0ec58ee2f4b1c/es_fluent/filters/core.py#L35-L50
|
[
"def build_filter(filter_or_string, *args, **kwargs):\n \"\"\"\n Overloaded filter construction. If ``filter_or_string`` is a string\n we look up it's corresponding class in the filter registry and return it.\n Otherwise, assume ``filter_or_string`` is an instance of a filter.\n\n :return: :class:`~es_fluent.filters.Filter`\n \"\"\"\n if isinstance(filter_or_string, basestring):\n # Names that start with `~` indicate a negated filter.\n if filter_or_string.startswith('~'):\n filter_name = filter_or_string[1:]\n return ~FILTER_REGISTRY[filter_name](*args, **kwargs)\n else:\n filter_name = filter_or_string\n return FILTER_REGISTRY[filter_name](*args, **kwargs)\n else:\n return filter_or_string\n",
"def add_filter(self, filter_or_string, *args, **kwargs):\n \"\"\"\n Appends a filter.\n \"\"\"\n self.filters.append(build_filter(filter_or_string, *args, **kwargs))\n\n return self\n",
"def find_filter(self, filter_cls):\n \"\"\"\n Find or create a filter instance of the provided ``filter_cls``. If it\n is found, use remaining arguments to augment the filter otherwise\n create a new instance of the desired type and add it to the\n current :class:`~es_fluent.builder.QueryBuilder` accordingly.\n \"\"\"\n for filter_instance in self.filters:\n if isinstance(filter_instance, filter_cls):\n return filter_instance\n\n return None\n"
] |
class Generic(Filter):
"""
Contains a generic list of filters. Serialized as a dictionary.
"""
def __init__(self):
self.filters = []
def is_empty(self):
"""
:return: ``True`` if this filter has nested clauses ``False``.
"""
return all(_filter.is_empty() for _filter in self.filters)
def add_filter(self, filter_or_string, *args, **kwargs):
"""
Appends a filter.
"""
self.filters.append(build_filter(filter_or_string, *args, **kwargs))
return self
def or_filter(self, filter_or_string, *args, **kwargs):
"""
Adds a list of :class:`~es_fluent.filters.core.Or` clauses, automatically
generating the an :class:`~es_fluent.filters.core.Or` filter if it does not
exist.
"""
or_filter = self.find_filter(Or)
if or_filter is None:
or_filter = Or()
self.filters.append(or_filter)
or_filter.add_filter(build_filter(
filter_or_string, *args, **kwargs
))
return or_filter
def find_filter(self, filter_cls):
"""
Find or create a filter instance of the provided ``filter_cls``. If it
is found, use remaining arguments to augment the filter otherwise
create a new instance of the desired type and add it to the
current :class:`~es_fluent.builder.QueryBuilder` accordingly.
"""
for filter_instance in self.filters:
if isinstance(filter_instance, filter_cls):
return filter_instance
return None
|
planetlabs/es_fluent
|
es_fluent/filters/core.py
|
Generic.or_filter
|
python
|
def or_filter(self, filter_or_string, *args, **kwargs):
or_filter = self.find_filter(Or)
if or_filter is None:
or_filter = Or()
self.filters.append(or_filter)
or_filter.add_filter(build_filter(
filter_or_string, *args, **kwargs
))
return or_filter
|
Adds a list of :class:`~es_fluent.filters.core.Or` clauses, automatically
generating the an :class:`~es_fluent.filters.core.Or` filter if it does not
exist.
|
train
|
https://github.com/planetlabs/es_fluent/blob/74f8db3a1bf9aa1d54512cf2d5e0ec58ee2f4b1c/es_fluent/filters/core.py#L52-L68
|
[
"def build_filter(filter_or_string, *args, **kwargs):\n \"\"\"\n Overloaded filter construction. If ``filter_or_string`` is a string\n we look up it's corresponding class in the filter registry and return it.\n Otherwise, assume ``filter_or_string`` is an instance of a filter.\n\n :return: :class:`~es_fluent.filters.Filter`\n \"\"\"\n if isinstance(filter_or_string, basestring):\n # Names that start with `~` indicate a negated filter.\n if filter_or_string.startswith('~'):\n filter_name = filter_or_string[1:]\n return ~FILTER_REGISTRY[filter_name](*args, **kwargs)\n else:\n filter_name = filter_or_string\n return FILTER_REGISTRY[filter_name](*args, **kwargs)\n else:\n return filter_or_string\n",
"def add_filter(self, filter_or_string, *args, **kwargs):\n \"\"\"\n Appends a filter.\n \"\"\"\n self.filters.append(build_filter(filter_or_string, *args, **kwargs))\n\n return self\n",
"def find_filter(self, filter_cls):\n \"\"\"\n Find or create a filter instance of the provided ``filter_cls``. If it\n is found, use remaining arguments to augment the filter otherwise\n create a new instance of the desired type and add it to the\n current :class:`~es_fluent.builder.QueryBuilder` accordingly.\n \"\"\"\n for filter_instance in self.filters:\n if isinstance(filter_instance, filter_cls):\n return filter_instance\n\n return None\n"
] |
class Generic(Filter):
"""
Contains a generic list of filters. Serialized as a dictionary.
"""
def __init__(self):
self.filters = []
def is_empty(self):
"""
:return: ``True`` if this filter has nested clauses ``False``.
"""
return all(_filter.is_empty() for _filter in self.filters)
def add_filter(self, filter_or_string, *args, **kwargs):
"""
Appends a filter.
"""
self.filters.append(build_filter(filter_or_string, *args, **kwargs))
return self
def and_filter(self, filter_or_string, *args, **kwargs):
"""
Adds a list of :class:`~es_fluent.filters.core.And` clauses, automatically
generating :class:`~es_fluent.filters.core.And` filter if it does not
exist.
"""
and_filter = self.find_filter(And)
if and_filter is None:
and_filter = And()
self.filters.append(and_filter)
and_filter.add_filter(build_filter(
filter_or_string, *args, **kwargs))
return and_filter
def find_filter(self, filter_cls):
"""
Find or create a filter instance of the provided ``filter_cls``. If it
is found, use remaining arguments to augment the filter otherwise
create a new instance of the desired type and add it to the
current :class:`~es_fluent.builder.QueryBuilder` accordingly.
"""
for filter_instance in self.filters:
if isinstance(filter_instance, filter_cls):
return filter_instance
return None
|
planetlabs/es_fluent
|
es_fluent/filters/core.py
|
Generic.find_filter
|
python
|
def find_filter(self, filter_cls):
for filter_instance in self.filters:
if isinstance(filter_instance, filter_cls):
return filter_instance
return None
|
Find or create a filter instance of the provided ``filter_cls``. If it
is found, use remaining arguments to augment the filter otherwise
create a new instance of the desired type and add it to the
current :class:`~es_fluent.builder.QueryBuilder` accordingly.
|
train
|
https://github.com/planetlabs/es_fluent/blob/74f8db3a1bf9aa1d54512cf2d5e0ec58ee2f4b1c/es_fluent/filters/core.py#L70-L81
| null |
class Generic(Filter):
"""
Contains a generic list of filters. Serialized as a dictionary.
"""
def __init__(self):
self.filters = []
def is_empty(self):
"""
:return: ``True`` if this filter has nested clauses ``False``.
"""
return all(_filter.is_empty() for _filter in self.filters)
def add_filter(self, filter_or_string, *args, **kwargs):
"""
Appends a filter.
"""
self.filters.append(build_filter(filter_or_string, *args, **kwargs))
return self
def and_filter(self, filter_or_string, *args, **kwargs):
"""
Adds a list of :class:`~es_fluent.filters.core.And` clauses, automatically
generating :class:`~es_fluent.filters.core.And` filter if it does not
exist.
"""
and_filter = self.find_filter(And)
if and_filter is None:
and_filter = And()
self.filters.append(and_filter)
and_filter.add_filter(build_filter(
filter_or_string, *args, **kwargs))
return and_filter
def or_filter(self, filter_or_string, *args, **kwargs):
"""
Adds a list of :class:`~es_fluent.filters.core.Or` clauses, automatically
generating the an :class:`~es_fluent.filters.core.Or` filter if it does not
exist.
"""
or_filter = self.find_filter(Or)
if or_filter is None:
or_filter = Or()
self.filters.append(or_filter)
or_filter.add_filter(build_filter(
filter_or_string, *args, **kwargs
))
return or_filter
|
planetlabs/es_fluent
|
es_fluent/filters/core.py
|
Dict.to_query
|
python
|
def to_query(self):
query = {}
for filter_instance in self.filters:
if filter_instance.is_empty():
continue
filter_query = filter_instance.to_query()
query.update(filter_query)
return query
|
Iterates over all filters and converts them to an Elastic HTTP API
suitable query.
Note: each :class:`~es_fluent.filters.Filter` is free to set it's own
filter dictionary. ESFluent does not attempt to guard against filters
that may clobber one another. If you wish to ensure that filters are
isolated, nest them inside of a boolean filter such as
:class:`~es_fluent.filters.core.And` or
:class:`~es_fluent.filters.core.Or`.
|
train
|
https://github.com/planetlabs/es_fluent/blob/74f8db3a1bf9aa1d54512cf2d5e0ec58ee2f4b1c/es_fluent/filters/core.py#L93-L112
| null |
class Dict(Generic):
"""
Contains a generic dictionary of filters e.g. in a top level ES Query
we may have::
{ "filtered": {"filter": {"and": {...}, "or": {...}, "exists": {...} }
The Dict filter may represent the dictionary inside of "filtered.filter".
"""
def is_empty(self):
"""
:return:
``True`` if this filter has nested clauses, otherwise ``False``.
"""
return len(self.filters) == 0
|
planetlabs/es_fluent
|
es_fluent/filters/geometry.py
|
prepare_geojson
|
python
|
def prepare_geojson(geojson):
# TODO CW orientation.
geojson = deepcopy(geojson)
if geojson["type"] == "Feature":
geojson = geojson["geometry"]
if hasattr(geojson, 'properties'):
del geojson['properties']
if geojson["type"] == "FeatureCollection":
geojson["type"] = "GeometryCollection"
geojson["geometries"] = [
feature["geometry"] for feature in geojson["features"]
]
del geojson["features"]
return geojson
|
Modifies incoming GeoJSON to make it Elastic friendly. This means:
1. CW orientation of polygons.
2. Re-casting of Features and FeatureCollections to Geometry and
GeometryCollections.
|
train
|
https://github.com/planetlabs/es_fluent/blob/74f8db3a1bf9aa1d54512cf2d5e0ec58ee2f4b1c/es_fluent/filters/geometry.py#L9-L32
| null |
"""
Geometry related filters require additional dependencies. Hence they're broken
out into their own module.
"""
from copy import deepcopy
from .core import Terminal
class GeoJSON(Terminal):
"""
Manages querying by GeoJSON. Automatically converts incoming GeoJSON
to elasticsearch friendly geometry. This generally means::
#. CW orientation of polygons.
#. Re-casting of Features and FeatureCollections to Geometry and
GeometryCollections.
"""
name = 'geometry'
def __init__(self, name, geojson):
self.name = name
self.geojson = prepare_geojson(geojson)
def to_query(self):
"""
Returns a json-serializable representation.
"""
return {
"geo_shape": {
self.name: {
"shape": self.geojson
}
}
}
class IndexedShape(Terminal):
"""
Searches by a previously indexed Geometry.
"""
name = "indexed_geometry"
def __init__(self, name, shape_id, index_name, doc_type, path):
"""
:param string name: The field to match against the target shape.
:param string shape_id: The id of the indexed shape within the index.
:param string index_name: The name of the index containing our shape.
:param string doc_type: The type of document within index_name.
:param string path:
The location of geometry field within the indexed doc.
"""
self.name = name
self.shape_id = shape_id
self.index_name = index_name
self.doc_type = doc_type
self.path = path
def to_query(self):
"""
Returns a json-serializable representation.
"""
return {
"geo_shape": {
self.name: {
"indexed_shape": {
"index": self.index_name,
"type": self.doc_type,
"id": self.shape_id,
"path": self.path
}
}
}
}
|
planetlabs/es_fluent
|
es_fluent/filters/geometry.py
|
IndexedShape.to_query
|
python
|
def to_query(self):
return {
"geo_shape": {
self.name: {
"indexed_shape": {
"index": self.index_name,
"type": self.doc_type,
"id": self.shape_id,
"path": self.path
}
}
}
}
|
Returns a json-serializable representation.
|
train
|
https://github.com/planetlabs/es_fluent/blob/74f8db3a1bf9aa1d54512cf2d5e0ec58ee2f4b1c/es_fluent/filters/geometry.py#L84-L99
| null |
class IndexedShape(Terminal):
"""
Searches by a previously indexed Geometry.
"""
name = "indexed_geometry"
def __init__(self, name, shape_id, index_name, doc_type, path):
"""
:param string name: The field to match against the target shape.
:param string shape_id: The id of the indexed shape within the index.
:param string index_name: The name of the index containing our shape.
:param string doc_type: The type of document within index_name.
:param string path:
The location of geometry field within the indexed doc.
"""
self.name = name
self.shape_id = shape_id
self.index_name = index_name
self.doc_type = doc_type
self.path = path
|
planetlabs/es_fluent
|
es_fluent/builder.py
|
QueryBuilder.and_filter
|
python
|
def and_filter(self, filter_or_string, *args, **kwargs):
self.root_filter.and_filter(filter_or_string, *args, **kwargs)
return self
|
Convenience method to delegate to the root_filter to generate an
:class:`~es_fluent.filters.core.And` clause.
:return: :class:`~es_fluent.builder.QueryBuilder`
|
train
|
https://github.com/planetlabs/es_fluent/blob/74f8db3a1bf9aa1d54512cf2d5e0ec58ee2f4b1c/es_fluent/builder.py#L37-L45
|
[
"def and_filter(self, filter_or_string, *args, **kwargs):\n \"\"\"\n Adds a list of :class:`~es_fluent.filters.core.And` clauses, automatically\n generating :class:`~es_fluent.filters.core.And` filter if it does not\n exist.\n \"\"\"\n and_filter = self.find_filter(And)\n\n if and_filter is None:\n and_filter = And()\n self.filters.append(and_filter)\n\n and_filter.add_filter(build_filter(\n filter_or_string, *args, **kwargs))\n\n return and_filter\n"
] |
class QueryBuilder(object):
def __init__(self):
self.root_filter = Dict()
self.script_fields = ScriptFields()
self.fields = Fields()
self.sorts = []
self.source = True
self._size = None
@property
def size(self):
"""
Sets current size limit of the ES response, which limits the number of
documents returned. By default this is unset and the number of
documents returned is up to ES.
:return:
The current size limit.
"""
return self._size
@size.setter
def size(self, size):
"""
Sets the size of the ES response.
:param size: The number of documents to limit the response to.
"""
self._size = size
def or_filter(self, filter_or_string, *args, **kwargs):
"""
Convenience method to delegate to the root_filter to generate an `or`
clause.
:return: :class:`~es_fluent.builder.QueryBuilder`
"""
self.root_filter.or_filter(filter_or_string, *args, **kwargs)
return self
def add_filter(self, filter_or_string, *args, **kwargs):
"""
Adds a filter to the query builder's filters.
:return: :class:`~es_fluent.builder.QueryBuilder`
"""
self.root_filter.add_filter(filter_or_string, *args, **kwargs)
return self
def add_field(self, field_instance):
"""
Adds a field to the query builder. The default behavior is
to return all fields. Explicitly adding a single field will
result in only that source field being returned.
:return: :class:`~es_fluent.builder.QueryBuilder`
"""
self.fields.add_field(field_instance)
return self
def add_script_field(self, field_instance):
"""
Add a script field to the query. The `field_instance` should be
an instance of `es_fluent.script_fields`.
:return: :class:`~es_fluent.builder.QueryBuilder`
"""
self.script_fields.add_field(field_instance)
return self
def find_filter(self, filter_cls):
"""
Finds an existing filter using a filter class `filter_cls`. If not
found, None is returned.
This method is useful in cases where one wants to modify and extend
and existing clause, a common example might be an
:class:`~es_fluent.filters.core.And` filter. The method only looks in the
query's top-level filter and does not recurse.
:param: ``filter_cls``
The the :class:`~es_fluent.filters.Filter` class
to find.
"""
return self.root_filter.find_filter(filter_cls)
def to_query(self):
result = {}
if not self.root_filter.is_empty():
result['filter'] = self.root_filter.to_query()
if not self.script_fields.is_empty():
result['script_fields'] = self.script_fields.to_query()
if not self.fields.to_query():
result['fields'] = self.fields.to_query()
# We don't bother with reprensenting sort as an object.
if len(self.sorts):
result['sort'] = self.sorts
if self._size is not None:
result['size'] = self._size
result['_source'] = self.source
return result
def disable_source(self):
"""
Don't include ``_source`` document in results.
:return: :class:`~es_fluent.builder.QueryBuilder`
"""
self.source = False
return self
def enable_source(self):
"""
Include ``_source`` document in results.
:return: :class:`~es_fluent.builder.QueryBuilder`
"""
self.source = True
def sort(self, field, direction="asc"):
"""
Adds sort criteria.
"""
if not isinstance(field, basestring):
raise ValueError("Field should be a string")
if direction not in ["asc", "desc"]:
raise ValueError("Sort direction should be `asc` or `desc`")
self.sorts.append({field: direction})
def remove_sort(self, field_name):
"""
Clears sorting criteria affecting ``field_name``.
"""
self.sorts = [dict(field=value) for field, value in self.sorts if field
is not field_name]
def sort_reset(self):
"""
Resets sorting criteria.
"""
self.sorts = []
|
planetlabs/es_fluent
|
es_fluent/builder.py
|
QueryBuilder.or_filter
|
python
|
def or_filter(self, filter_or_string, *args, **kwargs):
self.root_filter.or_filter(filter_or_string, *args, **kwargs)
return self
|
Convenience method to delegate to the root_filter to generate an `or`
clause.
:return: :class:`~es_fluent.builder.QueryBuilder`
|
train
|
https://github.com/planetlabs/es_fluent/blob/74f8db3a1bf9aa1d54512cf2d5e0ec58ee2f4b1c/es_fluent/builder.py#L47-L55
|
[
"def or_filter(self, filter_or_string, *args, **kwargs):\n \"\"\"\n Adds a list of :class:`~es_fluent.filters.core.Or` clauses, automatically\n generating the an :class:`~es_fluent.filters.core.Or` filter if it does not\n exist.\n \"\"\"\n or_filter = self.find_filter(Or)\n\n if or_filter is None:\n or_filter = Or()\n self.filters.append(or_filter)\n\n or_filter.add_filter(build_filter(\n filter_or_string, *args, **kwargs\n ))\n\n return or_filter\n"
] |
class QueryBuilder(object):
def __init__(self):
self.root_filter = Dict()
self.script_fields = ScriptFields()
self.fields = Fields()
self.sorts = []
self.source = True
self._size = None
@property
def size(self):
"""
Sets current size limit of the ES response, which limits the number of
documents returned. By default this is unset and the number of
documents returned is up to ES.
:return:
The current size limit.
"""
return self._size
@size.setter
def size(self, size):
"""
Sets the size of the ES response.
:param size: The number of documents to limit the response to.
"""
self._size = size
def and_filter(self, filter_or_string, *args, **kwargs):
"""
Convenience method to delegate to the root_filter to generate an
:class:`~es_fluent.filters.core.And` clause.
:return: :class:`~es_fluent.builder.QueryBuilder`
"""
self.root_filter.and_filter(filter_or_string, *args, **kwargs)
return self
def add_filter(self, filter_or_string, *args, **kwargs):
"""
Adds a filter to the query builder's filters.
:return: :class:`~es_fluent.builder.QueryBuilder`
"""
self.root_filter.add_filter(filter_or_string, *args, **kwargs)
return self
def add_field(self, field_instance):
"""
Adds a field to the query builder. The default behavior is
to return all fields. Explicitly adding a single field will
result in only that source field being returned.
:return: :class:`~es_fluent.builder.QueryBuilder`
"""
self.fields.add_field(field_instance)
return self
def add_script_field(self, field_instance):
"""
Add a script field to the query. The `field_instance` should be
an instance of `es_fluent.script_fields`.
:return: :class:`~es_fluent.builder.QueryBuilder`
"""
self.script_fields.add_field(field_instance)
return self
def find_filter(self, filter_cls):
"""
Finds an existing filter using a filter class `filter_cls`. If not
found, None is returned.
This method is useful in cases where one wants to modify and extend
and existing clause, a common example might be an
:class:`~es_fluent.filters.core.And` filter. The method only looks in the
query's top-level filter and does not recurse.
:param: ``filter_cls``
The the :class:`~es_fluent.filters.Filter` class
to find.
"""
return self.root_filter.find_filter(filter_cls)
def to_query(self):
result = {}
if not self.root_filter.is_empty():
result['filter'] = self.root_filter.to_query()
if not self.script_fields.is_empty():
result['script_fields'] = self.script_fields.to_query()
if not self.fields.to_query():
result['fields'] = self.fields.to_query()
# We don't bother with reprensenting sort as an object.
if len(self.sorts):
result['sort'] = self.sorts
if self._size is not None:
result['size'] = self._size
result['_source'] = self.source
return result
def disable_source(self):
"""
Don't include ``_source`` document in results.
:return: :class:`~es_fluent.builder.QueryBuilder`
"""
self.source = False
return self
def enable_source(self):
"""
Include ``_source`` document in results.
:return: :class:`~es_fluent.builder.QueryBuilder`
"""
self.source = True
def sort(self, field, direction="asc"):
"""
Adds sort criteria.
"""
if not isinstance(field, basestring):
raise ValueError("Field should be a string")
if direction not in ["asc", "desc"]:
raise ValueError("Sort direction should be `asc` or `desc`")
self.sorts.append({field: direction})
def remove_sort(self, field_name):
"""
Clears sorting criteria affecting ``field_name``.
"""
self.sorts = [dict(field=value) for field, value in self.sorts if field
is not field_name]
def sort_reset(self):
"""
Resets sorting criteria.
"""
self.sorts = []
|
planetlabs/es_fluent
|
es_fluent/builder.py
|
QueryBuilder.add_filter
|
python
|
def add_filter(self, filter_or_string, *args, **kwargs):
self.root_filter.add_filter(filter_or_string, *args, **kwargs)
return self
|
Adds a filter to the query builder's filters.
:return: :class:`~es_fluent.builder.QueryBuilder`
|
train
|
https://github.com/planetlabs/es_fluent/blob/74f8db3a1bf9aa1d54512cf2d5e0ec58ee2f4b1c/es_fluent/builder.py#L57-L64
|
[
"def add_filter(self, filter_or_string, *args, **kwargs):\n \"\"\"\n Appends a filter.\n \"\"\"\n self.filters.append(build_filter(filter_or_string, *args, **kwargs))\n\n return self\n"
] |
class QueryBuilder(object):
def __init__(self):
self.root_filter = Dict()
self.script_fields = ScriptFields()
self.fields = Fields()
self.sorts = []
self.source = True
self._size = None
@property
def size(self):
"""
Sets current size limit of the ES response, which limits the number of
documents returned. By default this is unset and the number of
documents returned is up to ES.
:return:
The current size limit.
"""
return self._size
@size.setter
def size(self, size):
"""
Sets the size of the ES response.
:param size: The number of documents to limit the response to.
"""
self._size = size
def and_filter(self, filter_or_string, *args, **kwargs):
"""
Convenience method to delegate to the root_filter to generate an
:class:`~es_fluent.filters.core.And` clause.
:return: :class:`~es_fluent.builder.QueryBuilder`
"""
self.root_filter.and_filter(filter_or_string, *args, **kwargs)
return self
def or_filter(self, filter_or_string, *args, **kwargs):
"""
Convenience method to delegate to the root_filter to generate an `or`
clause.
:return: :class:`~es_fluent.builder.QueryBuilder`
"""
self.root_filter.or_filter(filter_or_string, *args, **kwargs)
return self
def add_field(self, field_instance):
"""
Adds a field to the query builder. The default behavior is
to return all fields. Explicitly adding a single field will
result in only that source field being returned.
:return: :class:`~es_fluent.builder.QueryBuilder`
"""
self.fields.add_field(field_instance)
return self
def add_script_field(self, field_instance):
"""
Add a script field to the query. The `field_instance` should be
an instance of `es_fluent.script_fields`.
:return: :class:`~es_fluent.builder.QueryBuilder`
"""
self.script_fields.add_field(field_instance)
return self
def find_filter(self, filter_cls):
"""
Finds an existing filter using a filter class `filter_cls`. If not
found, None is returned.
This method is useful in cases where one wants to modify and extend
and existing clause, a common example might be an
:class:`~es_fluent.filters.core.And` filter. The method only looks in the
query's top-level filter and does not recurse.
:param: ``filter_cls``
The the :class:`~es_fluent.filters.Filter` class
to find.
"""
return self.root_filter.find_filter(filter_cls)
def to_query(self):
result = {}
if not self.root_filter.is_empty():
result['filter'] = self.root_filter.to_query()
if not self.script_fields.is_empty():
result['script_fields'] = self.script_fields.to_query()
if not self.fields.to_query():
result['fields'] = self.fields.to_query()
# We don't bother with reprensenting sort as an object.
if len(self.sorts):
result['sort'] = self.sorts
if self._size is not None:
result['size'] = self._size
result['_source'] = self.source
return result
def disable_source(self):
"""
Don't include ``_source`` document in results.
:return: :class:`~es_fluent.builder.QueryBuilder`
"""
self.source = False
return self
def enable_source(self):
"""
Include ``_source`` document in results.
:return: :class:`~es_fluent.builder.QueryBuilder`
"""
self.source = True
def sort(self, field, direction="asc"):
"""
Adds sort criteria.
"""
if not isinstance(field, basestring):
raise ValueError("Field should be a string")
if direction not in ["asc", "desc"]:
raise ValueError("Sort direction should be `asc` or `desc`")
self.sorts.append({field: direction})
def remove_sort(self, field_name):
"""
Clears sorting criteria affecting ``field_name``.
"""
self.sorts = [dict(field=value) for field, value in self.sorts if field
is not field_name]
def sort_reset(self):
"""
Resets sorting criteria.
"""
self.sorts = []
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.